From 7ad920b504a980adcab4d3f6b85695526e6fd7bb Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 6 Oct 2009 11:31:05 -0700 Subject: ceph: documentation Mount options, syntax. Signed-off-by: Sage Weil --- Documentation/filesystems/ceph.txt | 139 +++++++++++++++++++++++++++++++++++++ 1 file changed, 139 insertions(+) create mode 100644 Documentation/filesystems/ceph.txt diff --git a/Documentation/filesystems/ceph.txt b/Documentation/filesystems/ceph.txt new file mode 100644 index 000000000000..6e03917316bd --- /dev/null +++ b/Documentation/filesystems/ceph.txt @@ -0,0 +1,139 @@ +Ceph Distributed File System +============================ + +Ceph is a distributed network file system designed to provide good +performance, reliability, and scalability. + +Basic features include: + + * POSIX semantics + * Seamless scaling from 1 to many thousands of nodes + * High availability and reliability. No single points of failure. + * N-way replication of data across storage nodes + * Fast recovery from node failures + * Automatic rebalancing of data on node addition/removal + * Easy deployment: most FS components are userspace daemons + +Also, + * Flexible snapshots (on any directory) + * Recursive accounting (nested files, directories, bytes) + +In contrast to cluster filesystems like GFS, OCFS2, and GPFS that rely +on symmetric access by all clients to shared block devices, Ceph +separates data and metadata management into independent server +clusters, similar to Lustre. Unlike Lustre, however, metadata and +storage nodes run entirely as user space daemons. Storage nodes +utilize btrfs to store data objects, leveraging its advanced features +(checksumming, metadata replication, etc.). File data is striped +across storage nodes in large chunks to distribute workload and +facilitate high throughputs. When storage nodes fail, data is +re-replicated in a distributed fashion by the storage nodes themselves +(with some minimal coordination from a cluster monitor), making the +system extremely efficient and scalable. + +Metadata servers effectively form a large, consistent, distributed +in-memory cache above the file namespace that is extremely scalable, +dynamically redistributes metadata in response to workload changes, +and can tolerate arbitrary (well, non-Byzantine) node failures. The +metadata server takes a somewhat unconventional approach to metadata +storage to significantly improve performance for common workloads. In +particular, inodes with only a single link are embedded in +directories, allowing entire directories of dentries and inodes to be +loaded into its cache with a single I/O operation. The contents of +extremely large directories can be fragmented and managed by +independent metadata servers, allowing scalable concurrent access. + +The system offers automatic data rebalancing/migration when scaling +from a small cluster of just a few nodes to many hundreds, without +requiring an administrator carve the data set into static volumes or +go through the tedious process of migrating data between servers. +When the file system approaches full, new nodes can be easily added +and things will "just work." + +Ceph includes flexible snapshot mechanism that allows a user to create +a snapshot on any subdirectory (and its nested contents) in the +system. Snapshot creation and deletion are as simple as 'mkdir +.snap/foo' and 'rmdir .snap/foo'. + +Ceph also provides some recursive accounting on directories for nested +files and bytes. That is, a 'getfattr -d foo' on any directory in the +system will reveal the total number of nested regular files and +subdirectories, and a summation of all nested file sizes. This makes +the identification of large disk space consumers relatively quick, as +no 'du' or similar recursive scan of the file system is required. + + +Mount Syntax +============ + +The basic mount syntax is: + + # mount -t ceph monip[:port][,monip2[:port]...]:/[subdir] mnt + +You only need to specify a single monitor, as the client will get the +full list when it connects. (However, if the monitor you specify +happens to be down, the mount won't succeed.) The port can be left +off if the monitor is using the default. So if the monitor is at +1.2.3.4, + + # mount -t ceph 1.2.3.4:/ /mnt/ceph + +is sufficient. If /sbin/mount.ceph is installed, a hostname can be +used instead of an IP address. + + + +Mount Options +============= + + ip=A.B.C.D[:N] + Specify the IP and/or port the client should bind to locally. + There is normally not much reason to do this. If the IP is not + specified, the client's IP address is determined by looking at the + address it's connection to the monitor originates from. + + wsize=X + Specify the maximum write size in bytes. By default there is no + maximu. Ceph will normally size writes based on the file stripe + size. + + rsize=X + Specify the maximum readahead. + + mount_timeout=X + Specify the timeout value for mount (in seconds), in the case + of a non-responsive Ceph file system. The default is 30 + seconds. + + rbytes + When stat() is called on a directory, set st_size to 'rbytes', + the summation of file sizes over all files nested beneath that + directory. This is the default. + + norbytes + When stat() is called on a directory, set st_size to the + number of entries in that directory. + + nocrc + Disable CRC32C calculation for data writes. If set, the OSD + must rely on TCP's error correction to detect data corruption + in the data payload. + + noasyncreaddir + Disable client's use its local cache to satisfy readdir + requests. (This does not change correctness; the client uses + cached metadata only when a lease or capability ensures it is + valid.) + + +More Information +================ + +For more information on Ceph, see the home page at + http://ceph.newdream.net/ + +The Linux kernel client source tree is available at + git://ceph.newdream.net/linux-ceph-client.git + +and the source for the full system is at + git://ceph.newdream.net/ceph.git -- cgit v1.2.3 From 0dee3c28af2fbe22ca62739a7f57da5435d35793 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 6 Oct 2009 11:31:06 -0700 Subject: ceph: on-wire types These headers describe the types used to exchange messages between the Ceph client and various servers. All types are little-endian and packed. These headers are shared between the kernel and userspace, so all types are in terms of e.g. __u32. Additionally, we define a few magic values to identify the current version of the protocol(s) in use, so that discrepancies to be detected on mount. Signed-off-by: Sage Weil --- fs/ceph/ceph_fs.c | 80 +++++++ fs/ceph/ceph_fs.h | 629 +++++++++++++++++++++++++++++++++++++++++++++++++ fs/ceph/ceph_strings.c | 163 +++++++++++++ fs/ceph/msgr.h | 157 ++++++++++++ fs/ceph/rados.h | 372 +++++++++++++++++++++++++++++ 5 files changed, 1401 insertions(+) create mode 100644 fs/ceph/ceph_fs.c create mode 100644 fs/ceph/ceph_fs.h create mode 100644 fs/ceph/ceph_strings.c create mode 100644 fs/ceph/msgr.h create mode 100644 fs/ceph/rados.h diff --git a/fs/ceph/ceph_fs.c b/fs/ceph/ceph_fs.c new file mode 100644 index 000000000000..9371ff1c0002 --- /dev/null +++ b/fs/ceph/ceph_fs.c @@ -0,0 +1,80 @@ +/* + * Some non-inline ceph helpers + */ +#include "types.h" + +int ceph_flags_to_mode(int flags) +{ +#ifdef O_DIRECTORY /* fixme */ + if ((flags & O_DIRECTORY) == O_DIRECTORY) + return CEPH_FILE_MODE_PIN; +#endif +#ifdef O_LAZY + if (flags & O_LAZY) + return CEPH_FILE_MODE_LAZY; +#endif + if ((flags & O_APPEND) == O_APPEND) + flags |= O_WRONLY; + + flags &= O_ACCMODE; + if ((flags & O_RDWR) == O_RDWR) + return CEPH_FILE_MODE_RDWR; + if ((flags & O_WRONLY) == O_WRONLY) + return CEPH_FILE_MODE_WR; + return CEPH_FILE_MODE_RD; +} + +int ceph_caps_for_mode(int mode) +{ + switch (mode) { + case CEPH_FILE_MODE_PIN: + return CEPH_CAP_PIN; + case CEPH_FILE_MODE_RD: + return CEPH_CAP_PIN | CEPH_CAP_FILE_SHARED | + CEPH_CAP_FILE_RD | CEPH_CAP_FILE_CACHE; + case CEPH_FILE_MODE_RDWR: + return CEPH_CAP_PIN | CEPH_CAP_FILE_SHARED | + CEPH_CAP_FILE_EXCL | + CEPH_CAP_FILE_RD | CEPH_CAP_FILE_CACHE | + CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER | + CEPH_CAP_AUTH_SHARED | CEPH_CAP_AUTH_EXCL | + CEPH_CAP_XATTR_SHARED | CEPH_CAP_XATTR_EXCL; + case CEPH_FILE_MODE_WR: + return CEPH_CAP_PIN | CEPH_CAP_FILE_SHARED | + CEPH_CAP_FILE_EXCL | + CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER | + CEPH_CAP_AUTH_SHARED | CEPH_CAP_AUTH_EXCL | + CEPH_CAP_XATTR_SHARED | CEPH_CAP_XATTR_EXCL; + } + return 0; +} + +/* Name hashing routines. Initial hash value */ +/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */ +#define ceph_init_name_hash() 0 + +/* partial hash update function. Assume roughly 4 bits per character */ +static unsigned long ceph_partial_name_hash(unsigned long c, + unsigned long prevhash) +{ + return (prevhash + (c << 4) + (c >> 4)) * 11; +} + +/* + * Finally: cut down the number of bits to a int value (and try to avoid + * losing bits) + */ +static unsigned long ceph_end_name_hash(unsigned long hash) +{ + return hash & 0xffffffff; +} + +/* Compute the hash for a name string. */ +unsigned int ceph_full_name_hash(const char *name, unsigned int len) +{ + unsigned long hash = ceph_init_name_hash(); + while (len--) + hash = ceph_partial_name_hash(*name++, hash); + return ceph_end_name_hash(hash); +} + diff --git a/fs/ceph/ceph_fs.h b/fs/ceph/ceph_fs.h new file mode 100644 index 000000000000..21ed51b127f2 --- /dev/null +++ b/fs/ceph/ceph_fs.h @@ -0,0 +1,629 @@ +/* + * ceph_fs.h - Ceph constants and data types to share between kernel and + * user space. + * + * Most types in this file are defined as little-endian, and are + * primarily intended to describe data structures that pass over the + * wire or that are stored on disk. + * + * LGPL2 + */ + +#ifndef _FS_CEPH_CEPH_FS_H +#define _FS_CEPH_CEPH_FS_H + +#include "msgr.h" +#include "rados.h" + +/* + * Ceph release version + */ +#define CEPH_VERSION_MAJOR 0 +#define CEPH_VERSION_MINOR 16 +#define CEPH_VERSION_PATCH 1 + +#define _CEPH_STRINGIFY(x) #x +#define CEPH_STRINGIFY(x) _CEPH_STRINGIFY(x) +#define CEPH_MAKE_VERSION(x, y, z) CEPH_STRINGIFY(x) "." CEPH_STRINGIFY(y) \ + "." CEPH_STRINGIFY(z) +#define CEPH_VERSION CEPH_MAKE_VERSION(CEPH_VERSION_MAJOR, \ + CEPH_VERSION_MINOR, CEPH_VERSION_PATCH) + +/* + * subprotocol versions. when specific messages types or high-level + * protocols change, bump the affected components. we keep rev + * internal cluster protocols separately from the public, + * client-facing protocol. + */ +#define CEPH_OSD_PROTOCOL 7 /* cluster internal */ +#define CEPH_MDS_PROTOCOL 9 /* cluster internal */ +#define CEPH_MON_PROTOCOL 4 /* cluster internal */ +#define CEPH_OSDC_PROTOCOL 20 /* server/client */ +#define CEPH_MDSC_PROTOCOL 29 /* server/client */ +#define CEPH_MONC_PROTOCOL 14 /* server/client */ + + +#define CEPH_INO_ROOT 1 + +/* arbitrary limit on max # of monitors (cluster of 3 is typical) */ +#define CEPH_MAX_MON 31 + + +unsigned int ceph_full_name_hash(const char *name, unsigned int len); + + +/* + * ceph_file_layout - describe data layout for a file/inode + */ +struct ceph_file_layout { + /* file -> object mapping */ + __le32 fl_stripe_unit; /* stripe unit, in bytes. must be multiple + of page size. */ + __le32 fl_stripe_count; /* over this many objects */ + __le32 fl_object_size; /* until objects are this big, then move to + new objects */ + __le32 fl_cas_hash; /* 0 = none; 1 = sha256 */ + + /* pg -> disk layout */ + __le32 fl_object_stripe_unit; /* for per-object parity, if any */ + + /* object -> pg layout */ + __le32 fl_pg_preferred; /* preferred primary for pg (-1 for none) */ + __le32 fl_pg_pool; /* namespace, crush ruleset, rep level */ +} __attribute__ ((packed)); + + + + +/********************************************* + * message layer + */ + +/* + * message types + */ + +/* misc */ +#define CEPH_MSG_SHUTDOWN 1 +#define CEPH_MSG_PING 2 + +/* client <-> monitor */ +#define CEPH_MSG_MON_MAP 4 +#define CEPH_MSG_MON_GET_MAP 5 +#define CEPH_MSG_CLIENT_MOUNT 10 +#define CEPH_MSG_CLIENT_MOUNT_ACK 11 +#define CEPH_MSG_STATFS 13 +#define CEPH_MSG_STATFS_REPLY 14 +#define CEPH_MSG_MON_SUBSCRIBE 15 +#define CEPH_MSG_MON_SUBSCRIBE_ACK 16 + +/* client <-> mds */ +#define CEPH_MSG_MDS_GETMAP 20 +#define CEPH_MSG_MDS_MAP 21 + +#define CEPH_MSG_CLIENT_SESSION 22 +#define CEPH_MSG_CLIENT_RECONNECT 23 + +#define CEPH_MSG_CLIENT_REQUEST 24 +#define CEPH_MSG_CLIENT_REQUEST_FORWARD 25 +#define CEPH_MSG_CLIENT_REPLY 26 +#define CEPH_MSG_CLIENT_CAPS 0x310 +#define CEPH_MSG_CLIENT_LEASE 0x311 +#define CEPH_MSG_CLIENT_SNAP 0x312 +#define CEPH_MSG_CLIENT_CAPRELEASE 0x313 + +/* osd */ +#define CEPH_MSG_OSD_GETMAP 40 +#define CEPH_MSG_OSD_MAP 41 +#define CEPH_MSG_OSD_OP 42 +#define CEPH_MSG_OSD_OPREPLY 43 + + +struct ceph_mon_statfs { + __le64 have_version; + struct ceph_fsid fsid; + __le64 tid; +} __attribute__ ((packed)); + +struct ceph_statfs { + __le64 kb, kb_used, kb_avail; + __le64 num_objects; +} __attribute__ ((packed)); + +struct ceph_mon_statfs_reply { + struct ceph_fsid fsid; + __le64 tid; + __le64 version; + struct ceph_statfs st; +} __attribute__ ((packed)); + +struct ceph_osd_getmap { + __le64 have_version; + struct ceph_fsid fsid; + __le32 start; +} __attribute__ ((packed)); + +struct ceph_mds_getmap { + __le64 have_version; + struct ceph_fsid fsid; +} __attribute__ ((packed)); + +struct ceph_client_mount { + __le64 have_version; +} __attribute__ ((packed)); + +struct ceph_mon_subscribe_item { + __le64 have; + __u8 onetime; +} __attribute__ ((packed)); + +/* + * mds states + * > 0 -> in + * <= 0 -> out + */ +#define CEPH_MDS_STATE_DNE 0 /* down, does not exist. */ +#define CEPH_MDS_STATE_STOPPED -1 /* down, once existed, but no subtrees. + empty log. */ +#define CEPH_MDS_STATE_BOOT -4 /* up, boot announcement. */ +#define CEPH_MDS_STATE_STANDBY -5 /* up, idle. waiting for assignment. */ +#define CEPH_MDS_STATE_CREATING -6 /* up, creating MDS instance. */ +#define CEPH_MDS_STATE_STARTING -7 /* up, starting previously stopped mds */ +#define CEPH_MDS_STATE_STANDBY_REPLAY -8 /* up, tailing active node's journal */ + +#define CEPH_MDS_STATE_REPLAY 8 /* up, replaying journal. */ +#define CEPH_MDS_STATE_RESOLVE 9 /* up, disambiguating distributed + operations (import, rename, etc.) */ +#define CEPH_MDS_STATE_RECONNECT 10 /* up, reconnect to clients */ +#define CEPH_MDS_STATE_REJOIN 11 /* up, rejoining distributed cache */ +#define CEPH_MDS_STATE_CLIENTREPLAY 12 /* up, replaying client operations */ +#define CEPH_MDS_STATE_ACTIVE 13 /* up, active */ +#define CEPH_MDS_STATE_STOPPING 14 /* up, but exporting metadata */ + +extern const char *ceph_mds_state_name(int s); + + +/* + * metadata lock types. + * - these are bitmasks.. we can compose them + * - they also define the lock ordering by the MDS + * - a few of these are internal to the mds + */ +#define CEPH_LOCK_DN 1 +#define CEPH_LOCK_ISNAP 2 +#define CEPH_LOCK_IVERSION 4 /* mds internal */ +#define CEPH_LOCK_IFILE 8 /* mds internal */ +#define CEPH_LOCK_IAUTH 32 +#define CEPH_LOCK_ILINK 64 +#define CEPH_LOCK_IDFT 128 /* dir frag tree */ +#define CEPH_LOCK_INEST 256 /* mds internal */ +#define CEPH_LOCK_IXATTR 512 +#define CEPH_LOCK_INO 2048 /* immutable inode bits; not a lock */ + +/* client_session ops */ +enum { + CEPH_SESSION_REQUEST_OPEN, + CEPH_SESSION_OPEN, + CEPH_SESSION_REQUEST_CLOSE, + CEPH_SESSION_CLOSE, + CEPH_SESSION_REQUEST_RENEWCAPS, + CEPH_SESSION_RENEWCAPS, + CEPH_SESSION_STALE, + CEPH_SESSION_RECALL_STATE, +}; + +extern const char *ceph_session_op_name(int op); + +struct ceph_mds_session_head { + __le32 op; + __le64 seq; + struct ceph_timespec stamp; + __le32 max_caps, max_leases; +} __attribute__ ((packed)); + +/* client_request */ +/* + * metadata ops. + * & 0x001000 -> write op + * & 0x010000 -> follow symlink (e.g. stat(), not lstat()). + & & 0x100000 -> use weird ino/path trace + */ +#define CEPH_MDS_OP_WRITE 0x001000 +enum { + CEPH_MDS_OP_LOOKUP = 0x00100, + CEPH_MDS_OP_GETATTR = 0x00101, + CEPH_MDS_OP_LOOKUPHASH = 0x00102, + CEPH_MDS_OP_LOOKUPPARENT = 0x00103, + + CEPH_MDS_OP_SETXATTR = 0x01105, + CEPH_MDS_OP_RMXATTR = 0x01106, + CEPH_MDS_OP_SETLAYOUT = 0x01107, + CEPH_MDS_OP_SETATTR = 0x01108, + + CEPH_MDS_OP_MKNOD = 0x01201, + CEPH_MDS_OP_LINK = 0x01202, + CEPH_MDS_OP_UNLINK = 0x01203, + CEPH_MDS_OP_RENAME = 0x01204, + CEPH_MDS_OP_MKDIR = 0x01220, + CEPH_MDS_OP_RMDIR = 0x01221, + CEPH_MDS_OP_SYMLINK = 0x01222, + + CEPH_MDS_OP_CREATE = 0x00301, + CEPH_MDS_OP_OPEN = 0x00302, + CEPH_MDS_OP_READDIR = 0x00305, + + CEPH_MDS_OP_LOOKUPSNAP = 0x00400, + CEPH_MDS_OP_MKSNAP = 0x01400, + CEPH_MDS_OP_RMSNAP = 0x01401, + CEPH_MDS_OP_LSSNAP = 0x00402, +}; + +extern const char *ceph_mds_op_name(int op); + + +#define CEPH_SETATTR_MODE 1 +#define CEPH_SETATTR_UID 2 +#define CEPH_SETATTR_GID 4 +#define CEPH_SETATTR_MTIME 8 +#define CEPH_SETATTR_ATIME 16 +#define CEPH_SETATTR_SIZE 32 +#define CEPH_SETATTR_CTIME 64 + +union ceph_mds_request_args { + struct { + __le32 mask; /* CEPH_CAP_* */ + } __attribute__ ((packed)) getattr; + struct { + __le32 mode; + __le32 uid; + __le32 gid; + struct ceph_timespec mtime; + struct ceph_timespec atime; + __le64 size, old_size; /* old_size needed by truncate */ + __le32 mask; /* CEPH_SETATTR_* */ + } __attribute__ ((packed)) setattr; + struct { + __le32 frag; /* which dir fragment */ + __le32 max_entries; /* how many dentries to grab */ + } __attribute__ ((packed)) readdir; + struct { + __le32 mode; + __le32 rdev; + } __attribute__ ((packed)) mknod; + struct { + __le32 mode; + } __attribute__ ((packed)) mkdir; + struct { + __le32 flags; + __le32 mode; + __le32 stripe_unit; /* layout for newly created file */ + __le32 stripe_count; /* ... */ + __le32 object_size; + __le32 file_replication; + __le32 preferred; + } __attribute__ ((packed)) open; + struct { + __le32 flags; + } __attribute__ ((packed)) setxattr; + struct { + struct ceph_file_layout layout; + } __attribute__ ((packed)) setlayout; +} __attribute__ ((packed)); + +#define CEPH_MDS_FLAG_REPLAY 1 /* this is a replayed op */ +#define CEPH_MDS_FLAG_WANT_DENTRY 2 /* want dentry in reply */ + +struct ceph_mds_request_head { + __le64 tid, oldest_client_tid; + __le32 mdsmap_epoch; /* on client */ + __le32 flags; /* CEPH_MDS_FLAG_* */ + __u8 num_retry, num_fwd; /* count retry, fwd attempts */ + __le16 num_releases; /* # include cap/lease release records */ + __le32 op; /* mds op code */ + __le32 caller_uid, caller_gid; + __le64 ino; /* use this ino for openc, mkdir, mknod, + etc. (if replaying) */ + union ceph_mds_request_args args; +} __attribute__ ((packed)); + +/* cap/lease release record */ +struct ceph_mds_request_release { + __le64 ino, cap_id; /* ino and unique cap id */ + __le32 caps, wanted; /* new issued, wanted */ + __le32 seq, issue_seq, mseq; + __le32 dname_seq; /* if releasing a dentry lease, a */ + __le32 dname_len; /* string follows. */ +} __attribute__ ((packed)); + +/* client reply */ +struct ceph_mds_reply_head { + __le64 tid; + __le32 op; + __le32 result; + __le32 mdsmap_epoch; + __u8 safe; /* true if committed to disk */ + __u8 is_dentry, is_target; /* true if dentry, target inode records + are included with reply */ +} __attribute__ ((packed)); + +/* one for each node split */ +struct ceph_frag_tree_split { + __le32 frag; /* this frag splits... */ + __le32 by; /* ...by this many bits */ +} __attribute__ ((packed)); + +struct ceph_frag_tree_head { + __le32 nsplits; /* num ceph_frag_tree_split records */ + struct ceph_frag_tree_split splits[]; +} __attribute__ ((packed)); + +/* capability issue, for bundling with mds reply */ +struct ceph_mds_reply_cap { + __le32 caps, wanted; /* caps issued, wanted */ + __le64 cap_id; + __le32 seq, mseq; + __le64 realm; /* snap realm */ + __u8 flags; /* CEPH_CAP_FLAG_* */ +} __attribute__ ((packed)); + +#define CEPH_CAP_FLAG_AUTH 1 /* cap is issued by auth mds */ + +/* inode record, for bundling with mds reply */ +struct ceph_mds_reply_inode { + __le64 ino; + __le64 snapid; + __le32 rdev; + __le64 version; /* inode version */ + __le64 xattr_version; /* version for xattr blob */ + struct ceph_mds_reply_cap cap; /* caps issued for this inode */ + struct ceph_file_layout layout; + struct ceph_timespec ctime, mtime, atime; + __le32 time_warp_seq; + __le64 size, max_size, truncate_size; + __le32 truncate_seq; + __le32 mode, uid, gid; + __le32 nlink; + __le64 files, subdirs, rbytes, rfiles, rsubdirs; /* dir stats */ + struct ceph_timespec rctime; + struct ceph_frag_tree_head fragtree; /* (must be at end of struct) */ +} __attribute__ ((packed)); +/* followed by frag array, then symlink string, then xattr blob */ + +/* reply_lease follows dname, and reply_inode */ +struct ceph_mds_reply_lease { + __le16 mask; /* lease type(s) */ + __le32 duration_ms; /* lease duration */ + __le32 seq; +} __attribute__ ((packed)); + +struct ceph_mds_reply_dirfrag { + __le32 frag; /* fragment */ + __le32 auth; /* auth mds, if this is a delegation point */ + __le32 ndist; /* number of mds' this is replicated on */ + __le32 dist[]; +} __attribute__ ((packed)); + +/* file access modes */ +#define CEPH_FILE_MODE_PIN 0 +#define CEPH_FILE_MODE_RD 1 +#define CEPH_FILE_MODE_WR 2 +#define CEPH_FILE_MODE_RDWR 3 /* RD | WR */ +#define CEPH_FILE_MODE_LAZY 4 /* lazy io */ +#define CEPH_FILE_MODE_NUM 8 /* bc these are bit fields.. mostly */ + +int ceph_flags_to_mode(int flags); + + +/* capability bits */ +#define CEPH_CAP_PIN 1 /* no specific capabilities beyond the pin */ + +/* generic cap bits */ +#define CEPH_CAP_GSHARED 1 /* client can reads */ +#define CEPH_CAP_GEXCL 2 /* client can read and update */ +#define CEPH_CAP_GCACHE 4 /* (file) client can cache reads */ +#define CEPH_CAP_GRD 8 /* (file) client can read */ +#define CEPH_CAP_GWR 16 /* (file) client can write */ +#define CEPH_CAP_GBUFFER 32 /* (file) client can buffer writes */ +#define CEPH_CAP_GWREXTEND 64 /* (file) client can extend EOF */ +#define CEPH_CAP_GLAZYIO 128 /* (file) client can perform lazy io */ + +/* per-lock shift */ +#define CEPH_CAP_SAUTH 2 +#define CEPH_CAP_SLINK 4 +#define CEPH_CAP_SXATTR 6 +#define CEPH_CAP_SFILE 8 /* goes at the end (uses >2 cap bits) */ + +#define CEPH_CAP_BITS 16 + +/* composed values */ +#define CEPH_CAP_AUTH_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SAUTH) +#define CEPH_CAP_AUTH_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SAUTH) +#define CEPH_CAP_LINK_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SLINK) +#define CEPH_CAP_LINK_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SLINK) +#define CEPH_CAP_XATTR_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SXATTR) +#define CEPH_CAP_XATTR_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SXATTR) +#define CEPH_CAP_FILE(x) (x << CEPH_CAP_SFILE) +#define CEPH_CAP_FILE_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SFILE) +#define CEPH_CAP_FILE_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SFILE) +#define CEPH_CAP_FILE_CACHE (CEPH_CAP_GCACHE << CEPH_CAP_SFILE) +#define CEPH_CAP_FILE_RD (CEPH_CAP_GRD << CEPH_CAP_SFILE) +#define CEPH_CAP_FILE_WR (CEPH_CAP_GWR << CEPH_CAP_SFILE) +#define CEPH_CAP_FILE_BUFFER (CEPH_CAP_GBUFFER << CEPH_CAP_SFILE) +#define CEPH_CAP_FILE_WREXTEND (CEPH_CAP_GWREXTEND << CEPH_CAP_SFILE) +#define CEPH_CAP_FILE_LAZYIO (CEPH_CAP_GLAZYIO << CEPH_CAP_SFILE) + +/* cap masks (for getattr) */ +#define CEPH_STAT_CAP_INODE CEPH_CAP_PIN +#define CEPH_STAT_CAP_TYPE CEPH_CAP_PIN /* mode >> 12 */ +#define CEPH_STAT_CAP_SYMLINK CEPH_CAP_PIN +#define CEPH_STAT_CAP_UID CEPH_CAP_AUTH_SHARED +#define CEPH_STAT_CAP_GID CEPH_CAP_AUTH_SHARED +#define CEPH_STAT_CAP_MODE CEPH_CAP_AUTH_SHARED +#define CEPH_STAT_CAP_NLINK CEPH_CAP_LINK_SHARED +#define CEPH_STAT_CAP_LAYOUT CEPH_CAP_FILE_SHARED +#define CEPH_STAT_CAP_MTIME CEPH_CAP_FILE_SHARED +#define CEPH_STAT_CAP_SIZE CEPH_CAP_FILE_SHARED +#define CEPH_STAT_CAP_ATIME CEPH_CAP_FILE_SHARED /* fixme */ +#define CEPH_STAT_CAP_XATTR CEPH_CAP_XATTR_SHARED +#define CEPH_STAT_CAP_INODE_ALL (CEPH_CAP_PIN | \ + CEPH_CAP_AUTH_SHARED | \ + CEPH_CAP_LINK_SHARED | \ + CEPH_CAP_FILE_SHARED | \ + CEPH_CAP_XATTR_SHARED) + +#define CEPH_CAP_ANY_SHARED (CEPH_CAP_AUTH_SHARED | \ + CEPH_CAP_LINK_SHARED | \ + CEPH_CAP_XATTR_SHARED | \ + CEPH_CAP_FILE_SHARED) +#define CEPH_CAP_ANY_RD (CEPH_CAP_ANY_SHARED | CEPH_CAP_FILE_RD | \ + CEPH_CAP_FILE_CACHE) + +#define CEPH_CAP_ANY_EXCL (CEPH_CAP_AUTH_EXCL | \ + CEPH_CAP_LINK_EXCL | \ + CEPH_CAP_XATTR_EXCL | \ + CEPH_CAP_FILE_EXCL) +#define CEPH_CAP_ANY_FILE_WR (CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER | \ + CEPH_CAP_FILE_EXCL) +#define CEPH_CAP_ANY_WR (CEPH_CAP_ANY_EXCL | CEPH_CAP_ANY_FILE_WR) +#define CEPH_CAP_ANY (CEPH_CAP_ANY_RD | CEPH_CAP_ANY_EXCL | \ + CEPH_CAP_ANY_FILE_WR | CEPH_CAP_PIN) + +#define CEPH_CAP_LOCKS (CEPH_LOCK_IFILE | CEPH_LOCK_IAUTH | CEPH_LOCK_ILINK | \ + CEPH_LOCK_IXATTR) + +int ceph_caps_for_mode(int mode); + +enum { + CEPH_CAP_OP_GRANT, /* mds->client grant */ + CEPH_CAP_OP_REVOKE, /* mds->client revoke */ + CEPH_CAP_OP_TRUNC, /* mds->client trunc notify */ + CEPH_CAP_OP_EXPORT, /* mds has exported the cap */ + CEPH_CAP_OP_IMPORT, /* mds has imported the cap */ + CEPH_CAP_OP_UPDATE, /* client->mds update */ + CEPH_CAP_OP_DROP, /* client->mds drop cap bits */ + CEPH_CAP_OP_FLUSH, /* client->mds cap writeback */ + CEPH_CAP_OP_FLUSH_ACK, /* mds->client flushed */ + CEPH_CAP_OP_FLUSHSNAP, /* client->mds flush snapped metadata */ + CEPH_CAP_OP_FLUSHSNAP_ACK, /* mds->client flushed snapped metadata */ + CEPH_CAP_OP_RELEASE, /* client->mds release (clean) cap */ + CEPH_CAP_OP_RENEW, /* client->mds renewal request */ +}; + +extern const char *ceph_cap_op_name(int op); + +/* + * caps message, used for capability callbacks, acks, requests, etc. + */ +struct ceph_mds_caps { + __le32 op; /* CEPH_CAP_OP_* */ + __le64 ino, realm; + __le64 cap_id; + __le32 seq, issue_seq; + __le32 caps, wanted, dirty; /* latest issued/wanted/dirty */ + __le32 migrate_seq; + __le64 snap_follows; + __le32 snap_trace_len; + __le64 client_tid; /* for FLUSH(SNAP) -> FLUSH(SNAP)_ACK */ + + /* authlock */ + __le32 uid, gid, mode; + + /* linklock */ + __le32 nlink; + + /* xattrlock */ + __le32 xattr_len; + __le64 xattr_version; + + /* filelock */ + __le64 size, max_size, truncate_size; + __le32 truncate_seq; + struct ceph_timespec mtime, atime, ctime; + struct ceph_file_layout layout; + __le32 time_warp_seq; +} __attribute__ ((packed)); + +/* cap release msg head */ +struct ceph_mds_cap_release { + __le32 num; /* number of cap_items that follow */ +} __attribute__ ((packed)); + +struct ceph_mds_cap_item { + __le64 ino; + __le64 cap_id; + __le32 migrate_seq, seq; +} __attribute__ ((packed)); + +#define CEPH_MDS_LEASE_REVOKE 1 /* mds -> client */ +#define CEPH_MDS_LEASE_RELEASE 2 /* client -> mds */ +#define CEPH_MDS_LEASE_RENEW 3 /* client <-> mds */ +#define CEPH_MDS_LEASE_REVOKE_ACK 4 /* client -> mds */ + +extern const char *ceph_lease_op_name(int o); + +/* lease msg header */ +struct ceph_mds_lease { + __u8 action; /* CEPH_MDS_LEASE_* */ + __le16 mask; /* which lease */ + __le64 ino; + __le64 first, last; /* snap range */ + __le32 seq; + __le32 duration_ms; /* duration of renewal */ +} __attribute__ ((packed)); +/* followed by a __le32+string for dname */ + +/* client reconnect */ +struct ceph_mds_cap_reconnect { + __le64 cap_id; + __le32 wanted; + __le32 issued; + __le64 size; + struct ceph_timespec mtime, atime; + __le64 snaprealm; + __le64 pathbase; /* base ino for our path to this ino */ +} __attribute__ ((packed)); +/* followed by encoded string */ + +struct ceph_mds_snaprealm_reconnect { + __le64 ino; /* snap realm base */ + __le64 seq; /* snap seq for this snap realm */ + __le64 parent; /* parent realm */ +} __attribute__ ((packed)); + +/* + * snaps + */ +enum { + CEPH_SNAP_OP_UPDATE, /* CREATE or DESTROY */ + CEPH_SNAP_OP_CREATE, + CEPH_SNAP_OP_DESTROY, + CEPH_SNAP_OP_SPLIT, +}; + +extern const char *ceph_snap_op_name(int o); + +/* snap msg header */ +struct ceph_mds_snap_head { + __le32 op; /* CEPH_SNAP_OP_* */ + __le64 split; /* ino to split off, if any */ + __le32 num_split_inos; /* # inos belonging to new child realm */ + __le32 num_split_realms; /* # child realms udner new child realm */ + __le32 trace_len; /* size of snap trace blob */ +} __attribute__ ((packed)); +/* followed by split ino list, then split realms, then the trace blob */ + +/* + * encode info about a snaprealm, as viewed by a client + */ +struct ceph_mds_snap_realm { + __le64 ino; /* ino */ + __le64 created; /* snap: when created */ + __le64 parent; /* ino: parent realm */ + __le64 parent_since; /* snap: same parent since */ + __le64 seq; /* snap: version */ + __le32 num_snaps; + __le32 num_prior_parent_snaps; +} __attribute__ ((packed)); +/* followed by my snap list, then prior parent snap list */ + +#endif diff --git a/fs/ceph/ceph_strings.c b/fs/ceph/ceph_strings.c new file mode 100644 index 000000000000..90d19d9d8d8f --- /dev/null +++ b/fs/ceph/ceph_strings.c @@ -0,0 +1,163 @@ +/* + * Ceph string constants + */ +#include "types.h" + +const char *ceph_osd_op_name(int op) +{ + switch (op) { + case CEPH_OSD_OP_READ: return "read"; + case CEPH_OSD_OP_STAT: return "stat"; + + case CEPH_OSD_OP_MASKTRUNC: return "masktrunc"; + + case CEPH_OSD_OP_WRITE: return "write"; + case CEPH_OSD_OP_DELETE: return "delete"; + case CEPH_OSD_OP_TRUNCATE: return "truncate"; + case CEPH_OSD_OP_ZERO: return "zero"; + case CEPH_OSD_OP_WRITEFULL: return "writefull"; + + case CEPH_OSD_OP_APPEND: return "append"; + case CEPH_OSD_OP_STARTSYNC: return "startsync"; + case CEPH_OSD_OP_SETTRUNC: return "settrunc"; + case CEPH_OSD_OP_TRIMTRUNC: return "trimtrunc"; + + case CEPH_OSD_OP_TMAPUP: return "tmapup"; + case CEPH_OSD_OP_TMAPGET: return "tmapget"; + case CEPH_OSD_OP_TMAPPUT: return "tmapput"; + + case CEPH_OSD_OP_GETXATTR: return "getxattr"; + case CEPH_OSD_OP_GETXATTRS: return "getxattrs"; + case CEPH_OSD_OP_SETXATTR: return "setxattr"; + case CEPH_OSD_OP_SETXATTRS: return "setxattrs"; + case CEPH_OSD_OP_RESETXATTRS: return "resetxattrs"; + case CEPH_OSD_OP_RMXATTR: return "rmxattr"; + + case CEPH_OSD_OP_PULL: return "pull"; + case CEPH_OSD_OP_PUSH: return "push"; + case CEPH_OSD_OP_BALANCEREADS: return "balance-reads"; + case CEPH_OSD_OP_UNBALANCEREADS: return "unbalance-reads"; + case CEPH_OSD_OP_SCRUB: return "scrub"; + + case CEPH_OSD_OP_WRLOCK: return "wrlock"; + case CEPH_OSD_OP_WRUNLOCK: return "wrunlock"; + case CEPH_OSD_OP_RDLOCK: return "rdlock"; + case CEPH_OSD_OP_RDUNLOCK: return "rdunlock"; + case CEPH_OSD_OP_UPLOCK: return "uplock"; + case CEPH_OSD_OP_DNLOCK: return "dnlock"; + + case CEPH_OSD_OP_CALL: return "call"; + + case CEPH_OSD_OP_PGLS: return "pgls"; + } + return "???"; +} + +const char *ceph_mds_state_name(int s) +{ + switch (s) { + /* down and out */ + case CEPH_MDS_STATE_DNE: return "down:dne"; + case CEPH_MDS_STATE_STOPPED: return "down:stopped"; + /* up and out */ + case CEPH_MDS_STATE_BOOT: return "up:boot"; + case CEPH_MDS_STATE_STANDBY: return "up:standby"; + case CEPH_MDS_STATE_STANDBY_REPLAY: return "up:standby-replay"; + case CEPH_MDS_STATE_CREATING: return "up:creating"; + case CEPH_MDS_STATE_STARTING: return "up:starting"; + /* up and in */ + case CEPH_MDS_STATE_REPLAY: return "up:replay"; + case CEPH_MDS_STATE_RESOLVE: return "up:resolve"; + case CEPH_MDS_STATE_RECONNECT: return "up:reconnect"; + case CEPH_MDS_STATE_REJOIN: return "up:rejoin"; + case CEPH_MDS_STATE_CLIENTREPLAY: return "up:clientreplay"; + case CEPH_MDS_STATE_ACTIVE: return "up:active"; + case CEPH_MDS_STATE_STOPPING: return "up:stopping"; + } + return "???"; +} + +const char *ceph_session_op_name(int op) +{ + switch (op) { + case CEPH_SESSION_REQUEST_OPEN: return "request_open"; + case CEPH_SESSION_OPEN: return "open"; + case CEPH_SESSION_REQUEST_CLOSE: return "request_close"; + case CEPH_SESSION_CLOSE: return "close"; + case CEPH_SESSION_REQUEST_RENEWCAPS: return "request_renewcaps"; + case CEPH_SESSION_RENEWCAPS: return "renewcaps"; + case CEPH_SESSION_STALE: return "stale"; + case CEPH_SESSION_RECALL_STATE: return "recall_state"; + } + return "???"; +} + +const char *ceph_mds_op_name(int op) +{ + switch (op) { + case CEPH_MDS_OP_LOOKUP: return "lookup"; + case CEPH_MDS_OP_LOOKUPHASH: return "lookuphash"; + case CEPH_MDS_OP_LOOKUPPARENT: return "lookupparent"; + case CEPH_MDS_OP_GETATTR: return "getattr"; + case CEPH_MDS_OP_SETXATTR: return "setxattr"; + case CEPH_MDS_OP_SETATTR: return "setattr"; + case CEPH_MDS_OP_RMXATTR: return "rmxattr"; + case CEPH_MDS_OP_READDIR: return "readdir"; + case CEPH_MDS_OP_MKNOD: return "mknod"; + case CEPH_MDS_OP_LINK: return "link"; + case CEPH_MDS_OP_UNLINK: return "unlink"; + case CEPH_MDS_OP_RENAME: return "rename"; + case CEPH_MDS_OP_MKDIR: return "mkdir"; + case CEPH_MDS_OP_RMDIR: return "rmdir"; + case CEPH_MDS_OP_SYMLINK: return "symlink"; + case CEPH_MDS_OP_CREATE: return "create"; + case CEPH_MDS_OP_OPEN: return "open"; + case CEPH_MDS_OP_LOOKUPSNAP: return "lookupsnap"; + case CEPH_MDS_OP_LSSNAP: return "lssnap"; + case CEPH_MDS_OP_MKSNAP: return "mksnap"; + case CEPH_MDS_OP_RMSNAP: return "rmsnap"; + } + return "???"; +} + +const char *ceph_cap_op_name(int op) +{ + switch (op) { + case CEPH_CAP_OP_GRANT: return "grant"; + case CEPH_CAP_OP_REVOKE: return "revoke"; + case CEPH_CAP_OP_TRUNC: return "trunc"; + case CEPH_CAP_OP_EXPORT: return "export"; + case CEPH_CAP_OP_IMPORT: return "import"; + case CEPH_CAP_OP_UPDATE: return "update"; + case CEPH_CAP_OP_DROP: return "drop"; + case CEPH_CAP_OP_FLUSH: return "flush"; + case CEPH_CAP_OP_FLUSH_ACK: return "flush_ack"; + case CEPH_CAP_OP_FLUSHSNAP: return "flushsnap"; + case CEPH_CAP_OP_FLUSHSNAP_ACK: return "flushsnap_ack"; + case CEPH_CAP_OP_RELEASE: return "release"; + case CEPH_CAP_OP_RENEW: return "renew"; + } + return "???"; +} + +const char *ceph_lease_op_name(int o) +{ + switch (o) { + case CEPH_MDS_LEASE_REVOKE: return "revoke"; + case CEPH_MDS_LEASE_RELEASE: return "release"; + case CEPH_MDS_LEASE_RENEW: return "renew"; + case CEPH_MDS_LEASE_REVOKE_ACK: return "revoke_ack"; + } + return "???"; +} + +const char *ceph_snap_op_name(int o) +{ + switch (o) { + case CEPH_SNAP_OP_UPDATE: return "update"; + case CEPH_SNAP_OP_CREATE: return "create"; + case CEPH_SNAP_OP_DESTROY: return "destroy"; + case CEPH_SNAP_OP_SPLIT: return "split"; + } + return "???"; +} diff --git a/fs/ceph/msgr.h b/fs/ceph/msgr.h new file mode 100644 index 000000000000..73921ae43faa --- /dev/null +++ b/fs/ceph/msgr.h @@ -0,0 +1,157 @@ +#ifndef __MSGR_H +#define __MSGR_H + +/* + * Data types for message passing layer used by Ceph. + */ + +#define CEPH_MON_PORT 6789 /* default monitor port */ + +/* + * client-side processes will try to bind to ports in this + * range, simply for the benefit of tools like nmap or wireshark + * that would like to identify the protocol. + */ +#define CEPH_PORT_FIRST 6789 +#define CEPH_PORT_START 6800 /* non-monitors start here */ +#define CEPH_PORT_LAST 6900 + +/* + * tcp connection banner. include a protocol version. and adjust + * whenever the wire protocol changes. try to keep this string length + * constant. + */ +#define CEPH_BANNER "ceph v021" +#define CEPH_BANNER_MAX_LEN 30 + + +/* + * Rollover-safe type and comparator for 32-bit sequence numbers. + * Comparator returns -1, 0, or 1. + */ +typedef __u32 ceph_seq_t; + +static inline __s32 ceph_seq_cmp(__u32 a, __u32 b) +{ + return (__s32)a - (__s32)b; +} + + +/* + * entity_name -- logical name for a process participating in the + * network, e.g. 'mds0' or 'osd3'. + */ +struct ceph_entity_name { + __u8 type; /* CEPH_ENTITY_TYPE_* */ + __le64 num; +} __attribute__ ((packed)); + +#define CEPH_ENTITY_TYPE_MON 1 +#define CEPH_ENTITY_TYPE_MDS 2 +#define CEPH_ENTITY_TYPE_OSD 3 +#define CEPH_ENTITY_TYPE_CLIENT 4 +#define CEPH_ENTITY_TYPE_ADMIN 5 + +/* + * entity_addr -- network address + */ +struct ceph_entity_addr { + __le32 erank; /* entity's rank in process */ + __le32 nonce; /* unique id for process (e.g. pid) */ + struct sockaddr_storage in_addr; +} __attribute__ ((packed)); + +static inline bool ceph_entity_addr_is_local(const struct ceph_entity_addr *a, + const struct ceph_entity_addr *b) +{ + return a->nonce == b->nonce && + memcmp(&a->in_addr, &b->in_addr, sizeof(a->in_addr)) == 0; +} + +static inline bool ceph_entity_addr_equal(const struct ceph_entity_addr *a, + const struct ceph_entity_addr *b) +{ + return memcmp(a, b, sizeof(*a)) == 0; +} + +struct ceph_entity_inst { + struct ceph_entity_name name; + struct ceph_entity_addr addr; +} __attribute__ ((packed)); + + +/* used by message exchange protocol */ +#define CEPH_MSGR_TAG_READY 1 /* server->client: ready for messages */ +#define CEPH_MSGR_TAG_RESETSESSION 2 /* server->client: reset, try again */ +#define CEPH_MSGR_TAG_WAIT 3 /* server->client: wait for racing + incoming connection */ +#define CEPH_MSGR_TAG_RETRY_SESSION 4 /* server->client + cseq: try again + with higher cseq */ +#define CEPH_MSGR_TAG_RETRY_GLOBAL 5 /* server->client + gseq: try again + with higher gseq */ +#define CEPH_MSGR_TAG_CLOSE 6 /* closing pipe */ +#define CEPH_MSGR_TAG_MSG 7 /* message */ +#define CEPH_MSGR_TAG_ACK 8 /* message ack */ +#define CEPH_MSGR_TAG_KEEPALIVE 9 /* just a keepalive byte! */ +#define CEPH_MSGR_TAG_BADPROTOVER 10 /* bad protocol version */ + + +/* + * connection negotiation + */ +struct ceph_msg_connect { + __le32 host_type; /* CEPH_ENTITY_TYPE_* */ + __le32 global_seq; /* count connections initiated by this host */ + __le32 connect_seq; /* count connections initiated in this session */ + __le32 protocol_version; + __u8 flags; /* CEPH_MSG_CONNECT_* */ +} __attribute__ ((packed)); + +struct ceph_msg_connect_reply { + __u8 tag; + __le32 global_seq; + __le32 connect_seq; + __le32 protocol_version; + __u8 flags; +} __attribute__ ((packed)); + +#define CEPH_MSG_CONNECT_LOSSY 1 /* messages i send may be safely dropped */ + + +/* + * message header + */ +struct ceph_msg_header { + __le64 seq; /* message seq# for this session */ + __le16 type; /* message type */ + __le16 priority; /* priority. higher value == higher priority */ + + __le32 front_len; /* bytes in main payload */ + __le32 middle_len;/* bytes in middle payload */ + __le32 data_len; /* bytes of data payload */ + __le16 data_off; /* sender: include full offset; + receiver: mask against ~PAGE_MASK */ + + struct ceph_entity_inst src, orig_src; + __le32 dst_erank; + __le32 crc; /* header crc32c */ +} __attribute__ ((packed)); + +#define CEPH_MSG_PRIO_LOW 64 +#define CEPH_MSG_PRIO_DEFAULT 127 +#define CEPH_MSG_PRIO_HIGH 196 +#define CEPH_MSG_PRIO_HIGHEST 255 + +/* + * follows data payload + */ +struct ceph_msg_footer { + __le32 front_crc, middle_crc, data_crc; + __u8 flags; +} __attribute__ ((packed)); + +#define CEPH_MSG_FOOTER_COMPLETE (1<<0) /* msg wasn't aborted */ +#define CEPH_MSG_FOOTER_NOCRC (1<<1) /* no data crc */ + + +#endif diff --git a/fs/ceph/rados.h b/fs/ceph/rados.h new file mode 100644 index 000000000000..a48cf4ae391e --- /dev/null +++ b/fs/ceph/rados.h @@ -0,0 +1,372 @@ +#ifndef __RADOS_H +#define __RADOS_H + +/* + * Data types for the Ceph distributed object storage layer RADOS + * (Reliable Autonomic Distributed Object Store). + */ + +#include "msgr.h" + +/* + * fs id + */ +struct ceph_fsid { + unsigned char fsid[16]; +}; + +static inline int ceph_fsid_compare(const struct ceph_fsid *a, + const struct ceph_fsid *b) +{ + return memcmp(a, b, sizeof(*a)); +} + +/* + * ino, object, etc. + */ +typedef __le64 ceph_snapid_t; +#define CEPH_SNAPDIR ((__u64)(-1)) /* reserved for hidden .snap dir */ +#define CEPH_NOSNAP ((__u64)(-2)) /* "head", "live" revision */ +#define CEPH_MAXSNAP ((__u64)(-3)) /* largest valid snapid */ + +struct ceph_timespec { + __le32 tv_sec; + __le32 tv_nsec; +} __attribute__ ((packed)); + + +/* + * object layout - how objects are mapped into PGs + */ +#define CEPH_OBJECT_LAYOUT_HASH 1 +#define CEPH_OBJECT_LAYOUT_LINEAR 2 +#define CEPH_OBJECT_LAYOUT_HASHINO 3 + +/* + * pg layout -- how PGs are mapped onto (sets of) OSDs + */ +#define CEPH_PG_LAYOUT_CRUSH 0 +#define CEPH_PG_LAYOUT_HASH 1 +#define CEPH_PG_LAYOUT_LINEAR 2 +#define CEPH_PG_LAYOUT_HYBRID 3 + + +/* + * placement group. + * we encode this into one __le64. + */ +union ceph_pg { + __u64 pg64; + struct { + __s16 preferred; /* preferred primary osd */ + __u16 ps; /* placement seed */ + __u32 pool; /* object pool */ + } __attribute__ ((packed)) pg; +} __attribute__ ((packed)); + +/* + * pg_pool is a set of pgs storing a pool of objects + * + * pg_num -- base number of pseudorandomly placed pgs + * + * pgp_num -- effective number when calculating pg placement. this + * is used for pg_num increases. new pgs result in data being "split" + * into new pgs. for this to proceed smoothly, new pgs are intiially + * colocated with their parents; that is, pgp_num doesn't increase + * until the new pgs have successfully split. only _then_ are the new + * pgs placed independently. + * + * lpg_num -- localized pg count (per device). replicas are randomly + * selected. + * + * lpgp_num -- as above. + */ +#define CEPH_PG_TYPE_REP 1 +#define CEPH_PG_TYPE_RAID4 2 +struct ceph_pg_pool { + __u8 type; /* CEPH_PG_TYPE_* */ + __u8 size; /* number of osds in each pg */ + __u8 crush_ruleset; /* crush placement rule */ + __le32 pg_num, pgp_num; /* number of pg's */ + __le32 lpg_num, lpgp_num; /* number of localized pg's */ + __le32 last_change; /* most recent epoch changed */ + __le64 snap_seq; /* seq for per-pool snapshot */ + __le32 snap_epoch; /* epoch of last snap */ + __le32 num_snaps; + __le32 num_removed_snap_intervals; +} __attribute__ ((packed)); + +/* + * stable_mod func is used to control number of placement groups. + * similar to straight-up modulo, but produces a stable mapping as b + * increases over time. b is the number of bins, and bmask is the + * containing power of 2 minus 1. + * + * b <= bmask and bmask=(2**n)-1 + * e.g., b=12 -> bmask=15, b=123 -> bmask=127 + */ +static inline int ceph_stable_mod(int x, int b, int bmask) +{ + if ((x & bmask) < b) + return x & bmask; + else + return x & (bmask >> 1); +} + +/* + * object layout - how a given object should be stored. + */ +struct ceph_object_layout { + __le64 ol_pgid; /* raw pg, with _full_ ps precision. */ + __le32 ol_stripe_unit; /* for per-object parity, if any */ +} __attribute__ ((packed)); + +/* + * compound epoch+version, used by storage layer to serialize mutations + */ +struct ceph_eversion { + __le32 epoch; + __le64 version; +} __attribute__ ((packed)); + +/* + * osd map bits + */ + +/* status bits */ +#define CEPH_OSD_EXISTS 1 +#define CEPH_OSD_UP 2 + +/* osd weights. fixed point value: 0x10000 == 1.0 ("in"), 0 == "out" */ +#define CEPH_OSD_IN 0x10000 +#define CEPH_OSD_OUT 0 + + +/* + * osd map flag bits + */ +#define CEPH_OSDMAP_NEARFULL (1<<0) /* sync writes (near ENOSPC) */ +#define CEPH_OSDMAP_FULL (1<<1) /* no data writes (ENOSPC) */ +#define CEPH_OSDMAP_PAUSERD (1<<2) /* pause all reads */ +#define CEPH_OSDMAP_PAUSEWR (1<<3) /* pause all writes */ +#define CEPH_OSDMAP_PAUSEREC (1<<4) /* pause recovery */ + +/* + * osd ops + */ +#define CEPH_OSD_OP_MODE 0xf000 +#define CEPH_OSD_OP_MODE_RD 0x1000 +#define CEPH_OSD_OP_MODE_WR 0x2000 +#define CEPH_OSD_OP_MODE_RMW 0x3000 +#define CEPH_OSD_OP_MODE_SUB 0x4000 +#define CEPH_OSD_OP_MODE_EXEC 0x8000 + +#define CEPH_OSD_OP_TYPE 0x0f00 +#define CEPH_OSD_OP_TYPE_LOCK 0x0100 +#define CEPH_OSD_OP_TYPE_DATA 0x0200 +#define CEPH_OSD_OP_TYPE_ATTR 0x0300 +#define CEPH_OSD_OP_TYPE_EXEC 0x0400 +#define CEPH_OSD_OP_TYPE_PG 0x0500 + +enum { + /** data **/ + /* read */ + CEPH_OSD_OP_READ = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 1, + CEPH_OSD_OP_STAT = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 2, + + /* fancy read */ + CEPH_OSD_OP_MASKTRUNC = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 4, + + /* write */ + CEPH_OSD_OP_WRITE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 1, + CEPH_OSD_OP_WRITEFULL = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 2, + CEPH_OSD_OP_TRUNCATE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 3, + CEPH_OSD_OP_ZERO = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 4, + CEPH_OSD_OP_DELETE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 5, + + /* fancy write */ + CEPH_OSD_OP_APPEND = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 6, + CEPH_OSD_OP_STARTSYNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 7, + CEPH_OSD_OP_SETTRUNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 8, + CEPH_OSD_OP_TRIMTRUNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 9, + + CEPH_OSD_OP_TMAPUP = CEPH_OSD_OP_MODE_RMW | CEPH_OSD_OP_TYPE_DATA | 10, + CEPH_OSD_OP_TMAPPUT = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 11, + CEPH_OSD_OP_TMAPGET = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 12, + + CEPH_OSD_OP_CREATE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 13, + + /** attrs **/ + /* read */ + CEPH_OSD_OP_GETXATTR = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 1, + CEPH_OSD_OP_GETXATTRS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 2, + + /* write */ + CEPH_OSD_OP_SETXATTR = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 1, + CEPH_OSD_OP_SETXATTRS = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 2, + CEPH_OSD_OP_RESETXATTRS = CEPH_OSD_OP_MODE_WR|CEPH_OSD_OP_TYPE_ATTR | 3, + CEPH_OSD_OP_RMXATTR = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 4, + + /** subop **/ + CEPH_OSD_OP_PULL = CEPH_OSD_OP_MODE_SUB | 1, + CEPH_OSD_OP_PUSH = CEPH_OSD_OP_MODE_SUB | 2, + CEPH_OSD_OP_BALANCEREADS = CEPH_OSD_OP_MODE_SUB | 3, + CEPH_OSD_OP_UNBALANCEREADS = CEPH_OSD_OP_MODE_SUB | 4, + CEPH_OSD_OP_SCRUB = CEPH_OSD_OP_MODE_SUB | 5, + + /** lock **/ + CEPH_OSD_OP_WRLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 1, + CEPH_OSD_OP_WRUNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 2, + CEPH_OSD_OP_RDLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 3, + CEPH_OSD_OP_RDUNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 4, + CEPH_OSD_OP_UPLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 5, + CEPH_OSD_OP_DNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 6, + + /** exec **/ + CEPH_OSD_OP_CALL = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_EXEC | 1, + + /** pg **/ + CEPH_OSD_OP_PGLS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_PG | 1, +}; + +static inline int ceph_osd_op_type_lock(int op) +{ + return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_LOCK; +} +static inline int ceph_osd_op_type_data(int op) +{ + return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_DATA; +} +static inline int ceph_osd_op_type_attr(int op) +{ + return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_ATTR; +} +static inline int ceph_osd_op_type_exec(int op) +{ + return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_EXEC; +} +static inline int ceph_osd_op_type_pg(int op) +{ + return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_PG; +} + +static inline int ceph_osd_op_mode_subop(int op) +{ + return (op & CEPH_OSD_OP_MODE) == CEPH_OSD_OP_MODE_SUB; +} +static inline int ceph_osd_op_mode_read(int op) +{ + return (op & CEPH_OSD_OP_MODE) == CEPH_OSD_OP_MODE_RD; +} +static inline int ceph_osd_op_mode_modify(int op) +{ + return (op & CEPH_OSD_OP_MODE) == CEPH_OSD_OP_MODE_WR; +} + +#define CEPH_OSD_TMAP_HDR 'h' +#define CEPH_OSD_TMAP_SET 's' +#define CEPH_OSD_TMAP_RM 'r' + +extern const char *ceph_osd_op_name(int op); + + +/* + * osd op flags + * + * An op may be READ, WRITE, or READ|WRITE. + */ +enum { + CEPH_OSD_FLAG_ACK = 1, /* want (or is) "ack" ack */ + CEPH_OSD_FLAG_ONNVRAM = 2, /* want (or is) "onnvram" ack */ + CEPH_OSD_FLAG_ONDISK = 4, /* want (or is) "ondisk" ack */ + CEPH_OSD_FLAG_RETRY = 8, /* resend attempt */ + CEPH_OSD_FLAG_READ = 16, /* op may read */ + CEPH_OSD_FLAG_WRITE = 32, /* op may write */ + CEPH_OSD_FLAG_ORDERSNAP = 64, /* EOLDSNAP if snapc is out of order */ + CEPH_OSD_FLAG_PEERSTAT = 128, /* msg includes osd_peer_stat */ + CEPH_OSD_FLAG_BALANCE_READS = 256, + CEPH_OSD_FLAG_PARALLELEXEC = 512, /* execute op in parallel */ + CEPH_OSD_FLAG_PGOP = 1024, /* pg op, no object */ +}; + +enum { + CEPH_OSD_OP_FLAG_EXCL = 1, /* EXCL object create */ +}; + +#define EOLDSNAPC ERESTART /* ORDERSNAP flag set; writer has old snapc*/ +#define EBLACKLISTED ESHUTDOWN /* blacklisted */ + +/* + * an individual object operation. each may be accompanied by some data + * payload + */ +struct ceph_osd_op { + __le16 op; /* CEPH_OSD_OP_* */ + __le32 flags; /* CEPH_OSD_FLAG_* */ + union { + struct { + __le64 offset, length; + } __attribute__ ((packed)) extent; + struct { + __le32 name_len; + __le32 value_len; + } __attribute__ ((packed)) xattr; + struct { + __le64 truncate_size; + __le32 truncate_seq; + } __attribute__ ((packed)) trunc; + struct { + __u8 class_len; + __u8 method_len; + __u8 argc; + __le32 indata_len; + } __attribute__ ((packed)) cls; + struct { + __le64 cookie, count; + } __attribute__ ((packed)) pgls; + }; + __le32 payload_len; +} __attribute__ ((packed)); + +/* + * osd request message header. each request may include multiple + * ceph_osd_op object operations. + */ +struct ceph_osd_request_head { + __le64 tid; /* transaction id */ + __le32 client_inc; /* client incarnation */ + struct ceph_object_layout layout; /* pgid */ + __le32 osdmap_epoch; /* client's osdmap epoch */ + + __le32 flags; + + struct ceph_timespec mtime; /* for mutations only */ + struct ceph_eversion reassert_version; /* if we are replaying op */ + + __le32 object_len; /* length of object name */ + + __le64 snapid; /* snapid to read */ + __le64 snap_seq; /* writer's snap context */ + __le32 num_snaps; + + __le16 num_ops; + struct ceph_osd_op ops[]; /* followed by ops[], obj, ticket, snaps */ +} __attribute__ ((packed)); + +struct ceph_osd_reply_head { + __le64 tid; /* transaction id */ + __le32 client_inc; /* client incarnation */ + __le32 flags; + struct ceph_object_layout layout; + __le32 osdmap_epoch; + struct ceph_eversion reassert_version; /* for replaying uncommitted */ + + __le32 result; /* result code */ + + __le32 object_len; /* length of object name */ + __le32 num_ops; + struct ceph_osd_op ops[0]; /* ops[], object */ +} __attribute__ ((packed)); + + +#endif -- cgit v1.2.3 From de57606c23afded22202825b3db8a5d61859f198 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 6 Oct 2009 11:31:07 -0700 Subject: ceph: client types We first define constants, types, and prototypes for the kernel client proper. A few subsystems are defined separately later: the MDS, OSD, and monitor clients, and the messaging layer. Signed-off-by: Sage Weil --- fs/ceph/ceph_debug.h | 37 +++ fs/ceph/ceph_frag.c | 21 ++ fs/ceph/ceph_frag.h | 109 +++++++ fs/ceph/ceph_ver.h | 6 + fs/ceph/super.h | 890 +++++++++++++++++++++++++++++++++++++++++++++++++++ fs/ceph/types.h | 28 ++ 6 files changed, 1091 insertions(+) create mode 100644 fs/ceph/ceph_debug.h create mode 100644 fs/ceph/ceph_frag.c create mode 100644 fs/ceph/ceph_frag.h create mode 100644 fs/ceph/ceph_ver.h create mode 100644 fs/ceph/super.h create mode 100644 fs/ceph/types.h diff --git a/fs/ceph/ceph_debug.h b/fs/ceph/ceph_debug.h new file mode 100644 index 000000000000..1818c2305610 --- /dev/null +++ b/fs/ceph/ceph_debug.h @@ -0,0 +1,37 @@ +#ifndef _FS_CEPH_DEBUG_H +#define _FS_CEPH_DEBUG_H + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#ifdef CONFIG_CEPH_FS_PRETTYDEBUG + +/* + * wrap pr_debug to include a filename:lineno prefix on each line. + * this incurs some overhead (kernel size and execution time) due to + * the extra function call at each call site. + */ + +# if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) +extern const char *ceph_file_part(const char *s, int len); +# define dout(fmt, ...) \ + pr_debug(" %12.12s:%-4d : " fmt, \ + ceph_file_part(__FILE__, sizeof(__FILE__)), \ + __LINE__, ##__VA_ARGS__) +# else +/* faux printk call just to see any compiler warnings. */ +# define dout(fmt, ...) do { \ + if (0) \ + printk(KERN_DEBUG fmt, ##__VA_ARGS__); \ + } while (0) +# endif + +#else + +/* + * or, just wrap pr_debug + */ +# define dout(fmt, ...) pr_debug(" " fmt, ##__VA_ARGS__) + +#endif + +#endif diff --git a/fs/ceph/ceph_frag.c b/fs/ceph/ceph_frag.c new file mode 100644 index 000000000000..ab6cf35c4091 --- /dev/null +++ b/fs/ceph/ceph_frag.c @@ -0,0 +1,21 @@ +/* + * Ceph 'frag' type + */ +#include "types.h" + +int ceph_frag_compare(__u32 a, __u32 b) +{ + unsigned va = ceph_frag_value(a); + unsigned vb = ceph_frag_value(b); + if (va < vb) + return -1; + if (va > vb) + return 1; + va = ceph_frag_bits(a); + vb = ceph_frag_bits(b); + if (va < vb) + return -1; + if (va > vb) + return 1; + return 0; +} diff --git a/fs/ceph/ceph_frag.h b/fs/ceph/ceph_frag.h new file mode 100644 index 000000000000..793f50cb7c22 --- /dev/null +++ b/fs/ceph/ceph_frag.h @@ -0,0 +1,109 @@ +#ifndef _FS_CEPH_FRAG_H +#define _FS_CEPH_FRAG_H + +/* + * "Frags" are a way to describe a subset of a 32-bit number space, + * using a mask and a value to match against that mask. Any given frag + * (subset of the number space) can be partitioned into 2^n sub-frags. + * + * Frags are encoded into a 32-bit word: + * 8 upper bits = "bits" + * 24 lower bits = "value" + * (We could go to 5+27 bits, but who cares.) + * + * We use the _most_ significant bits of the 24 bit value. This makes + * values logically sort. + * + * Unfortunately, because the "bits" field is still in the high bits, we + * can't sort encoded frags numerically. However, it does allow you + * to feed encoded frags as values into frag_contains_value. + */ +static inline __u32 ceph_frag_make(__u32 b, __u32 v) +{ + return (b << 24) | + (v & (0xffffffu << (24-b)) & 0xffffffu); +} +static inline __u32 ceph_frag_bits(__u32 f) +{ + return f >> 24; +} +static inline __u32 ceph_frag_value(__u32 f) +{ + return f & 0xffffffu; +} +static inline __u32 ceph_frag_mask(__u32 f) +{ + return (0xffffffu << (24-ceph_frag_bits(f))) & 0xffffffu; +} +static inline __u32 ceph_frag_mask_shift(__u32 f) +{ + return 24 - ceph_frag_bits(f); +} + +static inline int ceph_frag_contains_value(__u32 f, __u32 v) +{ + return (v & ceph_frag_mask(f)) == ceph_frag_value(f); +} +static inline int ceph_frag_contains_frag(__u32 f, __u32 sub) +{ + /* is sub as specific as us, and contained by us? */ + return ceph_frag_bits(sub) >= ceph_frag_bits(f) && + (ceph_frag_value(sub) & ceph_frag_mask(f)) == ceph_frag_value(f); +} + +static inline __u32 ceph_frag_parent(__u32 f) +{ + return ceph_frag_make(ceph_frag_bits(f) - 1, + ceph_frag_value(f) & (ceph_frag_mask(f) << 1)); +} +static inline int ceph_frag_is_left_child(__u32 f) +{ + return ceph_frag_bits(f) > 0 && + (ceph_frag_value(f) & (0x1000000 >> ceph_frag_bits(f))) == 0; +} +static inline int ceph_frag_is_right_child(__u32 f) +{ + return ceph_frag_bits(f) > 0 && + (ceph_frag_value(f) & (0x1000000 >> ceph_frag_bits(f))) == 1; +} +static inline __u32 ceph_frag_sibling(__u32 f) +{ + return ceph_frag_make(ceph_frag_bits(f), + ceph_frag_value(f) ^ (0x1000000 >> ceph_frag_bits(f))); +} +static inline __u32 ceph_frag_left_child(__u32 f) +{ + return ceph_frag_make(ceph_frag_bits(f)+1, ceph_frag_value(f)); +} +static inline __u32 ceph_frag_right_child(__u32 f) +{ + return ceph_frag_make(ceph_frag_bits(f)+1, + ceph_frag_value(f) | (0x1000000 >> (1+ceph_frag_bits(f)))); +} +static inline __u32 ceph_frag_make_child(__u32 f, int by, int i) +{ + int newbits = ceph_frag_bits(f) + by; + return ceph_frag_make(newbits, + ceph_frag_value(f) | (i << (24 - newbits))); +} +static inline int ceph_frag_is_leftmost(__u32 f) +{ + return ceph_frag_value(f) == 0; +} +static inline int ceph_frag_is_rightmost(__u32 f) +{ + return ceph_frag_value(f) == ceph_frag_mask(f); +} +static inline __u32 ceph_frag_next(__u32 f) +{ + return ceph_frag_make(ceph_frag_bits(f), + ceph_frag_value(f) + (0x1000000 >> ceph_frag_bits(f))); +} + +/* + * comparator to sort frags logically, as when traversing the + * number space in ascending order... + */ +int ceph_frag_compare(__u32 a, __u32 b); + +#endif diff --git a/fs/ceph/ceph_ver.h b/fs/ceph/ceph_ver.h new file mode 100644 index 000000000000..66c3727b671b --- /dev/null +++ b/fs/ceph/ceph_ver.h @@ -0,0 +1,6 @@ +#ifndef __CEPH_VERSION_H +#define __CEPH_VERSION_H + +#define CEPH_GIT_VER 335cd8f952b457095ea2a66aee3db50efb63c91d + +#endif diff --git a/fs/ceph/super.h b/fs/ceph/super.h new file mode 100644 index 000000000000..cfd39ef4023e --- /dev/null +++ b/fs/ceph/super.h @@ -0,0 +1,890 @@ +#ifndef _FS_CEPH_SUPER_H +#define _FS_CEPH_SUPER_H + +#include "ceph_debug.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "types.h" +#include "messenger.h" +#include "msgpool.h" +#include "mon_client.h" +#include "mds_client.h" +#include "osd_client.h" +#include "ceph_fs.h" + +/* f_type in struct statfs */ +#define CEPH_SUPER_MAGIC 0x00c36400 + +/* large granularity for statfs utilization stats to facilitate + * large volume sizes on 32-bit machines. */ +#define CEPH_BLOCK_SHIFT 20 /* 1 MB */ +#define CEPH_BLOCK (1 << CEPH_BLOCK_SHIFT) + +/* + * mount options + */ +#define CEPH_OPT_FSID (1<<0) +#define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */ +#define CEPH_OPT_MYIP (1<<2) /* specified my ip */ +#define CEPH_OPT_DIRSTAT (1<<4) /* funky `cat dirname` for stats */ +#define CEPH_OPT_RBYTES (1<<5) /* dir st_bytes = rbytes */ +#define CEPH_OPT_NOCRC (1<<6) /* no data crc on writes */ +#define CEPH_OPT_NOASYNCREADDIR (1<<7) /* no dcache readdir */ + +#define CEPH_OPT_DEFAULT (CEPH_OPT_RBYTES) + +#define ceph_set_opt(client, opt) \ + (client)->mount_args.flags |= CEPH_OPT_##opt; +#define ceph_test_opt(client, opt) \ + (!!((client)->mount_args.flags & CEPH_OPT_##opt)) + + +#define CEPH_MAX_MON_MOUNT_ADDR 5 + +struct ceph_mount_args { + int sb_flags; + int flags; + int mount_timeout; + int caps_wanted_delay_min, caps_wanted_delay_max; + struct ceph_fsid fsid; + struct ceph_entity_addr my_addr; + int wsize; + int rsize; /* max readahead */ + int max_readdir; /* max readdir size */ + int osd_timeout; + char *snapdir_name; /* default ".snap" */ + char *secret; + int cap_release_safety; +}; + +/* + * defaults + */ +#define CEPH_MOUNT_TIMEOUT_DEFAULT 60 +#define CEPH_MOUNT_RSIZE_DEFAULT (128*1024) /* readahead */ + +#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024) +#define CEPH_MSG_MAX_DATA_LEN (16*1024*1024) + +#define CEPH_SNAPDIRNAME_DEFAULT ".snap" + +/* + * Delay telling the MDS we no longer want caps, in case we reopen + * the file. Delay a minimum amount of time, even if we send a cap + * message for some other reason. Otherwise, take the oppotunity to + * update the mds to avoid sending another message later. + */ +#define CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT 5 /* cap release delay */ +#define CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT 60 /* cap release delay */ + + +/* mount state */ +enum { + CEPH_MOUNT_MOUNTING, + CEPH_MOUNT_MOUNTED, + CEPH_MOUNT_UNMOUNTING, + CEPH_MOUNT_UNMOUNTED, + CEPH_MOUNT_SHUTDOWN, +}; + +/* + * subtract jiffies + */ +static inline unsigned long time_sub(unsigned long a, unsigned long b) +{ + BUG_ON(time_after(b, a)); + return (long)a - (long)b; +} + +/* + * per-filesystem client state + * + * possibly shared by multiple mount points, if they are + * mounting the same ceph filesystem/cluster. + */ +struct ceph_client { + __s64 whoami; /* my client number */ + struct dentry *debugfs_monmap; + struct dentry *debugfs_mdsmap, *debugfs_osdmap; + struct dentry *debugfs_dir, *debugfs_dentry_lru, *debugfs_caps; + + struct mutex mount_mutex; /* serialize mount attempts */ + struct ceph_mount_args mount_args; + struct ceph_fsid fsid; + + struct super_block *sb; + + unsigned long mount_state; + wait_queue_head_t mount_wq; + + int mount_err; + void *signed_ticket; /* our keys to the kingdom */ + int signed_ticket_len; + + struct ceph_messenger *msgr; /* messenger instance */ + struct ceph_mon_client monc; + struct ceph_mds_client mdsc; + struct ceph_osd_client osdc; + + /* writeback */ + mempool_t *wb_pagevec_pool; + struct workqueue_struct *wb_wq; + struct workqueue_struct *pg_inv_wq; + struct workqueue_struct *trunc_wq; + + struct backing_dev_info backing_dev_info; +}; + +static inline struct ceph_client *ceph_client(struct super_block *sb) +{ + return sb->s_fs_info; +} + + +/* + * File i/o capability. This tracks shared state with the metadata + * server that allows us to cache or writeback attributes or to read + * and write data. For any given inode, we should have one or more + * capabilities, one issued by each metadata server, and our + * cumulative access is the OR of all issued capabilities. + * + * Each cap is referenced by the inode's i_caps rbtree and by per-mds + * session capability lists. + */ +struct ceph_cap { + struct ceph_inode_info *ci; + struct rb_node ci_node; /* per-ci cap tree */ + struct ceph_mds_session *session; + struct list_head session_caps; /* per-session caplist */ + int mds; + u64 cap_id; /* unique cap id (mds provided) */ + int issued; /* latest, from the mds */ + int implemented; /* implemented superset of issued (for revocation) */ + int mds_wanted; + u32 seq, issue_seq, mseq, gen; + unsigned long last_used; + struct list_head caps_item; +}; + +#define CHECK_CAPS_NODELAY 1 /* do not delay any further */ +#define CHECK_CAPS_AUTHONLY 2 /* only check auth cap */ +#define CHECK_CAPS_FLUSH 4 /* flush any dirty caps */ + +/* + * Snapped cap state that is pending flush to mds. When a snapshot occurs, + * we first complete any in-process sync writes and writeback any dirty + * data before flushing the snapped state (tracked here) back to the MDS. + */ +struct ceph_cap_snap { + atomic_t nref; + struct ceph_inode_info *ci; + struct list_head ci_item, flushing_item; + + u64 follows, flush_tid; + int issued, dirty; + struct ceph_snap_context *context; + + mode_t mode; + uid_t uid; + gid_t gid; + + void *xattr_blob; + int xattr_len; + u64 xattr_version; + + u64 size; + struct timespec mtime, atime, ctime; + u64 time_warp_seq; + int writing; /* a sync write is still in progress */ + int dirty_pages; /* dirty pages awaiting writeback */ +}; + +static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap) +{ + if (atomic_dec_and_test(&capsnap->nref)) + kfree(capsnap); +} + +/* + * The frag tree describes how a directory is fragmented, potentially across + * multiple metadata servers. It is also used to indicate points where + * metadata authority is delegated, and whether/where metadata is replicated. + * + * A _leaf_ frag will be present in the i_fragtree IFF there is + * delegation info. That is, if mds >= 0 || ndist > 0. + */ +#define CEPH_MAX_DIRFRAG_REP 4 + +struct ceph_inode_frag { + struct rb_node node; + + /* fragtree state */ + u32 frag; + int split_by; /* i.e. 2^(split_by) children */ + + /* delegation and replication info */ + int mds; /* -1 if same authority as parent */ + int ndist; /* >0 if replicated */ + int dist[CEPH_MAX_DIRFRAG_REP]; +}; + +/* + * We cache inode xattrs as an encoded blob until they are first used, + * at which point we parse them into an rbtree. + */ +struct ceph_inode_xattr { + struct rb_node node; + + const char *name; + int name_len; + const char *val; + int val_len; + int dirty; + + int should_free_name; + int should_free_val; +}; + +struct ceph_inode_xattrs_info { + /* + * (still encoded) xattr blob. we avoid the overhead of parsing + * this until someone actually calls getxattr, etc. + * + * blob->vec.iov_len == 4 implies there are no xattrs; blob == + * NULL means we don't know. + */ + struct ceph_buffer *blob, *prealloc_blob; + + struct rb_root index; + bool dirty; + int count; + int names_size; + int vals_size; + u64 version, index_version; +}; + +/* + * Ceph inode. + */ +#define CEPH_I_COMPLETE 1 /* we have complete directory cached */ +#define CEPH_I_NODELAY 4 /* do not delay cap release */ +#define CEPH_I_FLUSH 8 /* do not delay flush of dirty metadata */ + +struct ceph_inode_info { + struct ceph_vino i_vino; /* ceph ino + snap */ + + u64 i_version; + u32 i_time_warp_seq; + + unsigned i_ceph_flags; + unsigned long i_release_count; + + struct ceph_file_layout i_layout; + char *i_symlink; + + /* for dirs */ + struct timespec i_rctime; + u64 i_rbytes, i_rfiles, i_rsubdirs; + u64 i_files, i_subdirs; + u64 i_max_offset; /* largest readdir offset, set with I_COMPLETE */ + + struct rb_root i_fragtree; + struct mutex i_fragtree_mutex; + + struct ceph_inode_xattrs_info i_xattrs; + + /* capabilities. protected _both_ by i_lock and cap->session's + * s_mutex. */ + struct rb_root i_caps; /* cap list */ + struct ceph_cap *i_auth_cap; /* authoritative cap, if any */ + unsigned i_dirty_caps, i_flushing_caps; /* mask of dirtied fields */ + struct list_head i_dirty_item, i_flushing_item; + u64 i_cap_flush_seq; + /* we need to track cap writeback on a per-cap-bit basis, to allow + * overlapping, pipelined cap flushes to the mds. we can probably + * reduce the tid to 8 bits if we're concerned about inode size. */ + u16 i_cap_flush_last_tid, i_cap_flush_tid[CEPH_CAP_BITS]; + wait_queue_head_t i_cap_wq; /* threads waiting on a capability */ + unsigned long i_hold_caps_min; /* jiffies */ + unsigned long i_hold_caps_max; /* jiffies */ + struct list_head i_cap_delay_list; /* for delayed cap release to mds */ + int i_cap_exporting_mds; /* to handle cap migration between */ + unsigned i_cap_exporting_mseq; /* mds's. */ + unsigned i_cap_exporting_issued; + struct ceph_cap_reservation i_cap_migration_resv; + struct list_head i_cap_snaps; /* snapped state pending flush to mds */ + struct ceph_snap_context *i_head_snapc; /* set if wr_buffer_head > 0 */ + unsigned i_snap_caps; /* cap bits for snapped files */ + + int i_nr_by_mode[CEPH_FILE_MODE_NUM]; /* open file counts */ + + u32 i_truncate_seq; /* last truncate to smaller size */ + u64 i_truncate_size; /* and the size we last truncated down to */ + int i_truncate_pending; /* still need to call vmtruncate */ + + u64 i_max_size; /* max file size authorized by mds */ + u64 i_reported_size; /* (max_)size reported to or requested of mds */ + u64 i_wanted_max_size; /* offset we'd like to write too */ + u64 i_requested_max_size; /* max_size we've requested */ + + /* held references to caps */ + int i_pin_ref; + int i_rd_ref, i_rdcache_ref, i_wr_ref; + int i_wrbuffer_ref, i_wrbuffer_ref_head; + u32 i_shared_gen; /* increment each time we get FILE_SHARED */ + u32 i_rdcache_gen; /* we increment this each time we get + FILE_CACHE. If it's non-zero, we + _may_ have cached pages. */ + u32 i_rdcache_revoking; /* RDCACHE gen to async invalidate, if any */ + + struct list_head i_unsafe_writes; /* uncommitted sync writes */ + struct list_head i_unsafe_dirops; /* uncommitted mds dir ops */ + spinlock_t i_unsafe_lock; + + struct ceph_snap_realm *i_snap_realm; /* snap realm (if caps) */ + int i_snap_realm_counter; /* snap realm (if caps) */ + struct list_head i_snap_realm_item; + struct list_head i_snap_flush_item; + + struct work_struct i_wb_work; /* writeback work */ + struct work_struct i_pg_inv_work; /* page invalidation work */ + + struct work_struct i_vmtruncate_work; + + struct inode vfs_inode; /* at end */ +}; + +static inline struct ceph_inode_info *ceph_inode(struct inode *inode) +{ + return list_entry(inode, struct ceph_inode_info, vfs_inode); +} + +static inline void ceph_i_clear(struct inode *inode, unsigned mask) +{ + struct ceph_inode_info *ci = ceph_inode(inode); + + spin_lock(&inode->i_lock); + ci->i_ceph_flags &= ~mask; + spin_unlock(&inode->i_lock); +} + +static inline void ceph_i_set(struct inode *inode, unsigned mask) +{ + struct ceph_inode_info *ci = ceph_inode(inode); + + spin_lock(&inode->i_lock); + ci->i_ceph_flags |= mask; + spin_unlock(&inode->i_lock); +} + +static inline bool ceph_i_test(struct inode *inode, unsigned mask) +{ + struct ceph_inode_info *ci = ceph_inode(inode); + bool r; + + smp_mb(); + r = (ci->i_ceph_flags & mask) == mask; + return r; +} + + +/* find a specific frag @f */ +extern struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, + u32 f); + +/* + * choose fragment for value @v. copy frag content to pfrag, if leaf + * exists + */ +extern u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v, + struct ceph_inode_frag *pfrag, + int *found); + +/* + * Ceph dentry state + */ +struct ceph_dentry_info { + struct ceph_mds_session *lease_session; + u32 lease_gen, lease_shared_gen; + u32 lease_seq; + unsigned long lease_renew_after, lease_renew_from; + struct list_head lru; + struct dentry *dentry; + u64 time; + u64 offset; +}; + +static inline struct ceph_dentry_info *ceph_dentry(struct dentry *dentry) +{ + return (struct ceph_dentry_info *)dentry->d_fsdata; +} + +static inline loff_t ceph_make_fpos(unsigned frag, unsigned off) +{ + return ((loff_t)frag << 32) | (loff_t)off; +} + +/* + * ino_t is <64 bits on many architectures, blech. + * + * don't include snap in ino hash, at least for now. + */ +static inline ino_t ceph_vino_to_ino(struct ceph_vino vino) +{ + ino_t ino = (ino_t)vino.ino; /* ^ (vino.snap << 20); */ +#if BITS_PER_LONG == 32 + ino ^= vino.ino >> (sizeof(u64)-sizeof(ino_t)) * 8; + if (!ino) + ino = 1; +#endif + return ino; +} + +static inline int ceph_set_ino_cb(struct inode *inode, void *data) +{ + ceph_inode(inode)->i_vino = *(struct ceph_vino *)data; + inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data); + return 0; +} + +static inline struct ceph_vino ceph_vino(struct inode *inode) +{ + return ceph_inode(inode)->i_vino; +} + +/* for printf-style formatting */ +#define ceph_vinop(i) ceph_inode(i)->i_vino.ino, ceph_inode(i)->i_vino.snap + +static inline u64 ceph_ino(struct inode *inode) +{ + return ceph_inode(inode)->i_vino.ino; +} +static inline u64 ceph_snap(struct inode *inode) +{ + return ceph_inode(inode)->i_vino.snap; +} + +static inline int ceph_ino_compare(struct inode *inode, void *data) +{ + struct ceph_vino *pvino = (struct ceph_vino *)data; + struct ceph_inode_info *ci = ceph_inode(inode); + return ci->i_vino.ino == pvino->ino && + ci->i_vino.snap == pvino->snap; +} + +static inline struct inode *ceph_find_inode(struct super_block *sb, + struct ceph_vino vino) +{ + ino_t t = ceph_vino_to_ino(vino); + return ilookup5(sb, t, ceph_ino_compare, &vino); +} + + +/* + * caps helpers + */ +static inline bool __ceph_is_any_real_caps(struct ceph_inode_info *ci) +{ + return !RB_EMPTY_ROOT(&ci->i_caps); +} + +extern int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented); +extern int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int t); +extern int __ceph_caps_issued_other(struct ceph_inode_info *ci, + struct ceph_cap *cap); + +static inline int ceph_caps_issued(struct ceph_inode_info *ci) +{ + int issued; + spin_lock(&ci->vfs_inode.i_lock); + issued = __ceph_caps_issued(ci, NULL); + spin_unlock(&ci->vfs_inode.i_lock); + return issued; +} + +static inline int ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, + int touch) +{ + int r; + spin_lock(&ci->vfs_inode.i_lock); + r = __ceph_caps_issued_mask(ci, mask, touch); + spin_unlock(&ci->vfs_inode.i_lock); + return r; +} + +static inline int __ceph_caps_dirty(struct ceph_inode_info *ci) +{ + return ci->i_dirty_caps | ci->i_flushing_caps; +} +extern int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask); + +extern int ceph_caps_revoking(struct ceph_inode_info *ci, int mask); +extern int __ceph_caps_used(struct ceph_inode_info *ci); + +extern int __ceph_caps_file_wanted(struct ceph_inode_info *ci); + +/* + * wanted, by virtue of open file modes AND cap refs (buffered/cached data) + */ +static inline int __ceph_caps_wanted(struct ceph_inode_info *ci) +{ + int w = __ceph_caps_file_wanted(ci) | __ceph_caps_used(ci); + if (w & CEPH_CAP_FILE_BUFFER) + w |= CEPH_CAP_FILE_EXCL; /* we want EXCL if dirty data */ + return w; +} + +/* what the mds thinks we want */ +extern int __ceph_caps_mds_wanted(struct ceph_inode_info *ci); + +extern void ceph_caps_init(void); +extern void ceph_caps_finalize(void); +extern int ceph_reserve_caps(struct ceph_cap_reservation *ctx, int need); +extern int ceph_unreserve_caps(struct ceph_cap_reservation *ctx); +extern void ceph_reservation_status(struct ceph_client *client, + int *total, int *avail, int *used, + int *reserved); + +static inline struct ceph_client *ceph_inode_to_client(struct inode *inode) +{ + return (struct ceph_client *)inode->i_sb->s_fs_info; +} + +static inline struct ceph_client *ceph_sb_to_client(struct super_block *sb) +{ + return (struct ceph_client *)sb->s_fs_info; +} + +static inline int ceph_queue_writeback(struct inode *inode) +{ + return queue_work(ceph_inode_to_client(inode)->wb_wq, + &ceph_inode(inode)->i_wb_work); +} + +static inline int ceph_queue_page_invalidation(struct inode *inode) +{ + return queue_work(ceph_inode_to_client(inode)->pg_inv_wq, + &ceph_inode(inode)->i_pg_inv_work); +} + + +/* + * we keep buffered readdir results attached to file->private_data + */ +struct ceph_file_info { + int fmode; /* initialized on open */ + + /* readdir: position within the dir */ + u32 frag; + struct ceph_mds_request *last_readdir; + int at_end; + + /* readdir: position within a frag */ + unsigned offset; /* offset of last chunk, adjusted for . and .. */ + u64 next_offset; /* offset of next chunk (last_name's + 1) */ + char *last_name; /* last entry in previous chunk */ + struct dentry *dentry; /* next dentry (for dcache readdir) */ + unsigned long dir_release_count; + + /* used for -o dirstat read() on directory thing */ + char *dir_info; + int dir_info_len; +}; + + + +/* + * snapshots + */ + +/* + * A "snap context" is the set of existing snapshots when we + * write data. It is used by the OSD to guide its COW behavior. + * + * The ceph_snap_context is refcounted, and attached to each dirty + * page, indicating which context the dirty data belonged when it was + * dirtied. + */ +struct ceph_snap_context { + atomic_t nref; + u64 seq; + int num_snaps; + u64 snaps[]; +}; + +static inline struct ceph_snap_context * +ceph_get_snap_context(struct ceph_snap_context *sc) +{ + /* + printk("get_snap_context %p %d -> %d\n", sc, atomic_read(&sc->nref), + atomic_read(&sc->nref)+1); + */ + if (sc) + atomic_inc(&sc->nref); + return sc; +} + +static inline void ceph_put_snap_context(struct ceph_snap_context *sc) +{ + if (!sc) + return; + /* + printk("put_snap_context %p %d -> %d\n", sc, atomic_read(&sc->nref), + atomic_read(&sc->nref)-1); + */ + if (atomic_dec_and_test(&sc->nref)) { + /*printk(" deleting snap_context %p\n", sc);*/ + kfree(sc); + } +} + +/* + * A "snap realm" describes a subset of the file hierarchy sharing + * the same set of snapshots that apply to it. The realms themselves + * are organized into a hierarchy, such that children inherit (some of) + * the snapshots of their parents. + * + * All inodes within the realm that have capabilities are linked into a + * per-realm list. + */ +struct ceph_snap_realm { + u64 ino; + atomic_t nref; + u64 created, seq; + u64 parent_ino; + u64 parent_since; /* snapid when our current parent became so */ + + u64 *prior_parent_snaps; /* snaps inherited from any parents we */ + int num_prior_parent_snaps; /* had prior to parent_since */ + u64 *snaps; /* snaps specific to this realm */ + int num_snaps; + + struct ceph_snap_realm *parent; + struct list_head children; /* list of child realms */ + struct list_head child_item; + + struct list_head empty_item; /* if i have ref==0 */ + + /* the current set of snaps for this realm */ + struct ceph_snap_context *cached_context; + + struct list_head inodes_with_caps; + spinlock_t inodes_with_caps_lock; +}; + + + +/* + * calculate the number of pages a given length and offset map onto, + * if we align the data. + */ +static inline int calc_pages_for(u64 off, u64 len) +{ + return ((off+len+PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT) - + (off >> PAGE_CACHE_SHIFT); +} + + + +/* snap.c */ +struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc, + u64 ino); +extern void ceph_get_snap_realm(struct ceph_mds_client *mdsc, + struct ceph_snap_realm *realm); +extern void ceph_put_snap_realm(struct ceph_mds_client *mdsc, + struct ceph_snap_realm *realm); +extern int ceph_update_snap_trace(struct ceph_mds_client *m, + void *p, void *e, bool deletion); +extern void ceph_handle_snap(struct ceph_mds_client *mdsc, + struct ceph_msg *msg); +extern void ceph_queue_cap_snap(struct ceph_inode_info *ci, + struct ceph_snap_context *snapc); +extern int __ceph_finish_cap_snap(struct ceph_inode_info *ci, + struct ceph_cap_snap *capsnap); +extern void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc); + +/* + * a cap_snap is "pending" if it is still awaiting an in-progress + * sync write (that may/may not still update size, mtime, etc.). + */ +static inline bool __ceph_have_pending_cap_snap(struct ceph_inode_info *ci) +{ + return !list_empty(&ci->i_cap_snaps) && + list_entry(ci->i_cap_snaps.prev, struct ceph_cap_snap, + ci_item)->writing; +} + + +/* super.c */ +extern struct kmem_cache *ceph_inode_cachep; +extern struct kmem_cache *ceph_cap_cachep; +extern struct kmem_cache *ceph_dentry_cachep; +extern struct kmem_cache *ceph_file_cachep; + +extern const char *ceph_msg_type_name(int type); + +#define FSID_FORMAT "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-" \ + "%02x%02x%02x%02x%02x%02x" +#define PR_FSID(f) (f)->fsid[0], (f)->fsid[1], (f)->fsid[2], (f)->fsid[3], \ + (f)->fsid[4], (f)->fsid[5], (f)->fsid[6], (f)->fsid[7], \ + (f)->fsid[8], (f)->fsid[9], (f)->fsid[10], (f)->fsid[11], \ + (f)->fsid[12], (f)->fsid[13], (f)->fsid[14], (f)->fsid[15] + +/* inode.c */ +extern const struct inode_operations ceph_file_iops; + +extern struct inode *ceph_alloc_inode(struct super_block *sb); +extern void ceph_destroy_inode(struct inode *inode); + +extern struct inode *ceph_get_inode(struct super_block *sb, + struct ceph_vino vino); +extern struct inode *ceph_get_snapdir(struct inode *parent); +extern int ceph_fill_file_size(struct inode *inode, int issued, + u32 truncate_seq, u64 truncate_size, u64 size); +extern void ceph_fill_file_time(struct inode *inode, int issued, + u64 time_warp_seq, struct timespec *ctime, + struct timespec *mtime, struct timespec *atime); +extern int ceph_fill_trace(struct super_block *sb, + struct ceph_mds_request *req, + struct ceph_mds_session *session); +extern int ceph_readdir_prepopulate(struct ceph_mds_request *req, + struct ceph_mds_session *session); + +extern int ceph_inode_holds_cap(struct inode *inode, int mask); + +extern int ceph_inode_set_size(struct inode *inode, loff_t size); +extern void ceph_inode_writeback(struct work_struct *work); +extern void ceph_vmtruncate_work(struct work_struct *work); +extern void __ceph_do_pending_vmtruncate(struct inode *inode); +extern void __ceph_queue_vmtruncate(struct inode *inode); + +extern int ceph_do_getattr(struct inode *inode, int mask); +extern int ceph_permission(struct inode *inode, int mask); +extern int ceph_setattr(struct dentry *dentry, struct iattr *attr); +extern int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry, + struct kstat *stat); + +/* xattr.c */ +extern int ceph_setxattr(struct dentry *, const char *, const void *, + size_t, int); +extern ssize_t ceph_getxattr(struct dentry *, const char *, void *, size_t); +extern ssize_t ceph_listxattr(struct dentry *, char *, size_t); +extern int ceph_removexattr(struct dentry *, const char *); +extern void __ceph_build_xattrs_blob(struct ceph_inode_info *ci); +extern void __ceph_destroy_xattrs(struct ceph_inode_info *ci); + +/* caps.c */ +extern const char *ceph_cap_string(int c); +extern void ceph_handle_caps(struct ceph_mds_session *session, + struct ceph_msg *msg); +extern int ceph_add_cap(struct inode *inode, + struct ceph_mds_session *session, u64 cap_id, + int fmode, unsigned issued, unsigned wanted, + unsigned cap, unsigned seq, u64 realmino, int flags, + struct ceph_cap_reservation *caps_reservation); +extern void __ceph_remove_cap(struct ceph_cap *cap, + struct ceph_cap_reservation *ctx); +static inline void ceph_remove_cap(struct ceph_cap *cap) +{ + struct inode *inode = &cap->ci->vfs_inode; + spin_lock(&inode->i_lock); + __ceph_remove_cap(cap, NULL); + spin_unlock(&inode->i_lock); +} + +extern void ceph_queue_caps_release(struct inode *inode); +extern int ceph_write_inode(struct inode *inode, int unused); +extern int ceph_fsync(struct file *file, struct dentry *dentry, int datasync); +extern void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc, + struct ceph_mds_session *session); +extern int ceph_get_cap_mds(struct inode *inode); +extern void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps); +extern void ceph_put_cap_refs(struct ceph_inode_info *ci, int had); +extern void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr, + struct ceph_snap_context *snapc); +extern void __ceph_flush_snaps(struct ceph_inode_info *ci, + struct ceph_mds_session **psession); +extern void ceph_check_caps(struct ceph_inode_info *ci, int flags, + struct ceph_mds_session *session); +extern void ceph_check_delayed_caps(struct ceph_mds_client *mdsc, + int flushdirty); + +extern int ceph_encode_inode_release(void **p, struct inode *inode, + int mds, int drop, int unless, int force); +extern int ceph_encode_dentry_release(void **p, struct dentry *dn, + int mds, int drop, int unless); + +extern int ceph_get_caps(struct ceph_inode_info *ci, int need, int want, + int *got, loff_t endoff); + +/* for counting open files by mode */ +static inline void __ceph_get_fmode(struct ceph_inode_info *ci, int mode) +{ + ci->i_nr_by_mode[mode]++; +} +extern void ceph_put_fmode(struct ceph_inode_info *ci, int mode); + +/* addr.c */ +extern const struct address_space_operations ceph_aops; +extern int ceph_mmap(struct file *file, struct vm_area_struct *vma); + +/* file.c */ +extern const struct file_operations ceph_file_fops; +extern const struct address_space_operations ceph_aops; +extern int ceph_open(struct inode *inode, struct file *file); +extern struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry, + struct nameidata *nd, int mode, + int locked_dir); +extern int ceph_release(struct inode *inode, struct file *filp); +extern void ceph_release_page_vector(struct page **pages, int num_pages); + +/* dir.c */ +extern const struct file_operations ceph_dir_fops; +extern const struct inode_operations ceph_dir_iops; +extern struct dentry_operations ceph_dentry_ops, ceph_snap_dentry_ops, + ceph_snapdir_dentry_ops; + +extern int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry); +extern struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, + struct dentry *dentry, int err); + +extern void ceph_dentry_lru_add(struct dentry *dn); +extern void ceph_dentry_lru_touch(struct dentry *dn); +extern void ceph_dentry_lru_del(struct dentry *dn); + +/* + * our d_ops vary depending on whether the inode is live, + * snapshotted (read-only), or a virtual ".snap" directory. + */ +int ceph_init_dentry(struct dentry *dentry); + + +/* ioctl.c */ +extern long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg); + +/* export.c */ +extern const struct export_operations ceph_export_ops; + +/* debugfs.c */ +extern int ceph_debugfs_init(void); +extern void ceph_debugfs_cleanup(void); +extern int ceph_debugfs_client_init(struct ceph_client *client); +extern void ceph_debugfs_client_cleanup(struct ceph_client *client); + +static inline struct inode *get_dentry_parent_inode(struct dentry *dentry) +{ + if (dentry && dentry->d_parent) + return dentry->d_parent->d_inode; + + return NULL; +} + +#endif /* _FS_CEPH_SUPER_H */ diff --git a/fs/ceph/types.h b/fs/ceph/types.h new file mode 100644 index 000000000000..8a514568cab2 --- /dev/null +++ b/fs/ceph/types.h @@ -0,0 +1,28 @@ +#ifndef _FS_CEPH_TYPES_H +#define _FS_CEPH_TYPES_H + +/* needed before including ceph_fs.h */ +#include +#include +#include +#include + +#include "ceph_fs.h" +#include "ceph_frag.h" + +/* + * Identify inodes by both their ino AND snapshot id (a u64). + */ +struct ceph_vino { + u64 ino; + u64 snap; +}; + + +/* context for the caps reservation mechanism */ +struct ceph_cap_reservation { + int count; +}; + + +#endif -- cgit v1.2.3 From c30dbb9cc7fc75ab1d0ee6fb084ba4684f7a665d Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 6 Oct 2009 11:31:07 -0700 Subject: ceph: ref counted buffer struct ceph_buffer is a simple ref-counted buffer. We transparently choose between kmalloc for small buffers and vmalloc for large ones. This is currently used only for allocating memory for xattr data. Signed-off-by: Sage Weil --- fs/ceph/buffer.c | 34 ++++++++++++++++++++++++++++++++++ fs/ceph/buffer.h | 55 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 89 insertions(+) create mode 100644 fs/ceph/buffer.c create mode 100644 fs/ceph/buffer.h diff --git a/fs/ceph/buffer.c b/fs/ceph/buffer.c new file mode 100644 index 000000000000..cf9aaccef22b --- /dev/null +++ b/fs/ceph/buffer.c @@ -0,0 +1,34 @@ + +#include "ceph_debug.h" +#include "buffer.h" + +struct ceph_buffer *ceph_buffer_new(gfp_t gfp) +{ + struct ceph_buffer *b; + + b = kmalloc(sizeof(*b), gfp); + if (!b) + return NULL; + atomic_set(&b->nref, 1); + b->vec.iov_base = NULL; + b->vec.iov_len = 0; + b->alloc_len = 0; + return b; +} + +int ceph_buffer_alloc(struct ceph_buffer *b, int len, gfp_t gfp) +{ + b->vec.iov_base = kmalloc(len, gfp | __GFP_NOWARN); + if (b->vec.iov_base) { + b->is_vmalloc = false; + } else { + b->vec.iov_base = __vmalloc(len, gfp, PAGE_KERNEL); + b->is_vmalloc = true; + } + if (!b->vec.iov_base) + return -ENOMEM; + b->alloc_len = len; + b->vec.iov_len = len; + return 0; +} + diff --git a/fs/ceph/buffer.h b/fs/ceph/buffer.h new file mode 100644 index 000000000000..16b1930acc45 --- /dev/null +++ b/fs/ceph/buffer.h @@ -0,0 +1,55 @@ +#ifndef __FS_CEPH_BUFFER_H +#define __FS_CEPH_BUFFER_H + +#include +#include +#include +#include + +/* + * a simple reference counted buffer. + * + * use kmalloc for small sizes (<= one page), vmalloc for larger + * sizes. + */ +struct ceph_buffer { + atomic_t nref; + struct kvec vec; + size_t alloc_len; + bool is_vmalloc; +}; + +struct ceph_buffer *ceph_buffer_new(gfp_t gfp); +int ceph_buffer_alloc(struct ceph_buffer *b, int len, gfp_t gfp); + +static inline struct ceph_buffer *ceph_buffer_get(struct ceph_buffer *b) +{ + atomic_inc(&b->nref); + return b; +} + +static inline void ceph_buffer_put(struct ceph_buffer *b) +{ + if (b && atomic_dec_and_test(&b->nref)) { + if (b->vec.iov_base) { + if (b->is_vmalloc) + vfree(b->vec.iov_base); + else + kfree(b->vec.iov_base); + } + kfree(b); + } +} + +static inline struct ceph_buffer *ceph_buffer_new_alloc(int len, gfp_t gfp) +{ + struct ceph_buffer *b = ceph_buffer_new(gfp); + + if (b && ceph_buffer_alloc(b, len, gfp) < 0) { + ceph_buffer_put(b); + b = NULL; + } + return b; +} + +#endif -- cgit v1.2.3 From 16725b9d2a2e3d0fd2b0034482e2eb0a2d78050f Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 6 Oct 2009 11:31:07 -0700 Subject: ceph: super.c Mount option parsing, client setup and teardown, and a few odds and ends (e.g., statfs). Signed-off-by: Sage Weil --- fs/ceph/super.c | 936 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 936 insertions(+) create mode 100644 fs/ceph/super.c diff --git a/fs/ceph/super.c b/fs/ceph/super.c new file mode 100644 index 000000000000..0723fb6e96a1 --- /dev/null +++ b/fs/ceph/super.c @@ -0,0 +1,936 @@ + +#include "ceph_debug.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ceph_ver.h" +#include "decode.h" +#include "super.h" +#include "mon_client.h" + +/* + * Ceph superblock operations + * + * Handle the basics of mounting, unmounting. + */ + + +/* + * find filename portion of a path (/foo/bar/baz -> baz) + */ +const char *ceph_file_part(const char *s, int len) +{ + const char *e = s + len; + + while (e != s && *(e-1) != '/') + e--; + return e; +} + + +/* + * super ops + */ +static void ceph_put_super(struct super_block *s) +{ + struct ceph_client *cl = ceph_client(s); + + dout("put_super\n"); + ceph_mdsc_close_sessions(&cl->mdsc); + return; +} + +static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf) +{ + struct ceph_client *client = ceph_inode_to_client(dentry->d_inode); + struct ceph_monmap *monmap = client->monc.monmap; + struct ceph_statfs st; + u64 fsid; + int err; + + dout("statfs\n"); + err = ceph_monc_do_statfs(&client->monc, &st); + if (err < 0) + return err; + + /* fill in kstatfs */ + buf->f_type = CEPH_SUPER_MAGIC; /* ?? */ + + /* + * express utilization in terms of large blocks to avoid + * overflow on 32-bit machines. + */ + buf->f_bsize = 1 << CEPH_BLOCK_SHIFT; + buf->f_blocks = le64_to_cpu(st.kb) >> (CEPH_BLOCK_SHIFT-10); + buf->f_bfree = (le64_to_cpu(st.kb) - le64_to_cpu(st.kb_used)) >> + (CEPH_BLOCK_SHIFT-10); + buf->f_bavail = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10); + + buf->f_files = le64_to_cpu(st.num_objects); + buf->f_ffree = -1; + buf->f_namelen = PATH_MAX; + buf->f_frsize = PAGE_CACHE_SIZE; + + /* leave fsid little-endian, regardless of host endianness */ + fsid = *(u64 *)(&monmap->fsid) ^ *((u64 *)&monmap->fsid + 1); + buf->f_fsid.val[0] = fsid & 0xffffffff; + buf->f_fsid.val[1] = fsid >> 32; + + return 0; +} + + +static int ceph_syncfs(struct super_block *sb, int wait) +{ + dout("sync_fs %d\n", wait); + ceph_osdc_sync(&ceph_client(sb)->osdc); + ceph_mdsc_sync(&ceph_client(sb)->mdsc); + return 0; +} + + +/** + * ceph_show_options - Show mount options in /proc/mounts + * @m: seq_file to write to + * @mnt: mount descriptor + */ +static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt) +{ + struct ceph_client *client = ceph_sb_to_client(mnt->mnt_sb); + struct ceph_mount_args *args = &client->mount_args; + + if (args->flags & CEPH_OPT_FSID) + seq_printf(m, ",fsidmajor=%llu,fsidminor%llu", + le64_to_cpu(*(__le64 *)&args->fsid.fsid[0]), + le64_to_cpu(*(__le64 *)&args->fsid.fsid[8])); + if (args->flags & CEPH_OPT_NOSHARE) + seq_puts(m, ",noshare"); + if (args->flags & CEPH_OPT_DIRSTAT) + seq_puts(m, ",dirstat"); + if ((args->flags & CEPH_OPT_RBYTES) == 0) + seq_puts(m, ",norbytes"); + if (args->flags & CEPH_OPT_NOCRC) + seq_puts(m, ",nocrc"); + if (args->flags & CEPH_OPT_NOASYNCREADDIR) + seq_puts(m, ",noasyncreaddir"); + if (strcmp(args->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT)) + seq_printf(m, ",snapdirname=%s", args->snapdir_name); + if (args->secret) + seq_puts(m, ",secret="); + return 0; +} + +/* + * caches + */ +struct kmem_cache *ceph_inode_cachep; +struct kmem_cache *ceph_cap_cachep; +struct kmem_cache *ceph_dentry_cachep; +struct kmem_cache *ceph_file_cachep; + +static void ceph_inode_init_once(void *foo) +{ + struct ceph_inode_info *ci = foo; + inode_init_once(&ci->vfs_inode); +} + +static int __init init_caches(void) +{ + ceph_inode_cachep = kmem_cache_create("ceph_inode_info", + sizeof(struct ceph_inode_info), + __alignof__(struct ceph_inode_info), + (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD), + ceph_inode_init_once); + if (ceph_inode_cachep == NULL) + return -ENOMEM; + + ceph_cap_cachep = KMEM_CACHE(ceph_cap, + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD); + if (ceph_cap_cachep == NULL) + goto bad_cap; + + ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info, + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD); + if (ceph_dentry_cachep == NULL) + goto bad_dentry; + + ceph_file_cachep = KMEM_CACHE(ceph_file_info, + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD); + if (ceph_file_cachep == NULL) + goto bad_file; + + return 0; + +bad_file: + kmem_cache_destroy(ceph_dentry_cachep); +bad_dentry: + kmem_cache_destroy(ceph_cap_cachep); +bad_cap: + kmem_cache_destroy(ceph_inode_cachep); + return -ENOMEM; +} + +static void destroy_caches(void) +{ + kmem_cache_destroy(ceph_inode_cachep); + kmem_cache_destroy(ceph_cap_cachep); + kmem_cache_destroy(ceph_dentry_cachep); + kmem_cache_destroy(ceph_file_cachep); +} + + +/* + * ceph_umount_begin - initiate forced umount. Tear down down the + * mount, skipping steps that may hang while waiting for server(s). + */ +static void ceph_umount_begin(struct super_block *sb) +{ + struct ceph_client *client = ceph_sb_to_client(sb); + + dout("ceph_umount_begin - starting forced umount\n"); + if (!client) + return; + client->mount_state = CEPH_MOUNT_SHUTDOWN; + return; +} + +static const struct super_operations ceph_super_ops = { + .alloc_inode = ceph_alloc_inode, + .destroy_inode = ceph_destroy_inode, + .write_inode = ceph_write_inode, + .sync_fs = ceph_syncfs, + .put_super = ceph_put_super, + .show_options = ceph_show_options, + .statfs = ceph_statfs, + .umount_begin = ceph_umount_begin, +}; + + +const char *ceph_msg_type_name(int type) +{ + switch (type) { + case CEPH_MSG_SHUTDOWN: return "shutdown"; + case CEPH_MSG_PING: return "ping"; + case CEPH_MSG_MON_MAP: return "mon_map"; + case CEPH_MSG_MON_GET_MAP: return "mon_get_map"; + case CEPH_MSG_MON_SUBSCRIBE: return "mon_subscribe"; + case CEPH_MSG_MON_SUBSCRIBE_ACK: return "mon_subscribe_ack"; + case CEPH_MSG_CLIENT_MOUNT: return "client_mount"; + case CEPH_MSG_CLIENT_MOUNT_ACK: return "client_mount_ack"; + case CEPH_MSG_STATFS: return "statfs"; + case CEPH_MSG_STATFS_REPLY: return "statfs_reply"; + case CEPH_MSG_MDS_GETMAP: return "mds_getmap"; + case CEPH_MSG_MDS_MAP: return "mds_map"; + case CEPH_MSG_CLIENT_SESSION: return "client_session"; + case CEPH_MSG_CLIENT_RECONNECT: return "client_reconnect"; + case CEPH_MSG_CLIENT_REQUEST: return "client_request"; + case CEPH_MSG_CLIENT_REQUEST_FORWARD: return "client_request_forward"; + case CEPH_MSG_CLIENT_REPLY: return "client_reply"; + case CEPH_MSG_CLIENT_CAPS: return "client_caps"; + case CEPH_MSG_CLIENT_CAPRELEASE: return "client_cap_release"; + case CEPH_MSG_CLIENT_SNAP: return "client_snap"; + case CEPH_MSG_CLIENT_LEASE: return "client_lease"; + case CEPH_MSG_OSD_GETMAP: return "osd_getmap"; + case CEPH_MSG_OSD_MAP: return "osd_map"; + case CEPH_MSG_OSD_OP: return "osd_op"; + case CEPH_MSG_OSD_OPREPLY: return "osd_opreply"; + default: return "unknown"; + } +} + + +/* + * mount options + */ +enum { + Opt_fsidmajor, + Opt_fsidminor, + Opt_monport, + Opt_wsize, + Opt_rsize, + Opt_osdtimeout, + Opt_mount_timeout, + Opt_caps_wanted_delay_min, + Opt_caps_wanted_delay_max, + Opt_readdir_max_entries, + /* int args above */ + Opt_snapdirname, + Opt_secret, + /* string args above */ + Opt_ip, + Opt_noshare, + Opt_dirstat, + Opt_nodirstat, + Opt_rbytes, + Opt_norbytes, + Opt_nocrc, + Opt_noasyncreaddir, +}; + +static match_table_t arg_tokens = { + {Opt_fsidmajor, "fsidmajor=%ld"}, + {Opt_fsidminor, "fsidminor=%ld"}, + {Opt_monport, "monport=%d"}, + {Opt_wsize, "wsize=%d"}, + {Opt_rsize, "rsize=%d"}, + {Opt_osdtimeout, "osdtimeout=%d"}, + {Opt_mount_timeout, "mount_timeout=%d"}, + {Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"}, + {Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"}, + {Opt_readdir_max_entries, "readdir_max_entries=%d"}, + /* int args above */ + {Opt_snapdirname, "snapdirname=%s"}, + {Opt_secret, "secret=%s"}, + /* string args above */ + {Opt_ip, "ip=%s"}, + {Opt_noshare, "noshare"}, + {Opt_dirstat, "dirstat"}, + {Opt_nodirstat, "nodirstat"}, + {Opt_rbytes, "rbytes"}, + {Opt_norbytes, "norbytes"}, + {Opt_nocrc, "nocrc"}, + {Opt_noasyncreaddir, "noasyncreaddir"}, + {-1, NULL} +}; + + +static int parse_mount_args(struct ceph_client *client, + int flags, char *options, const char *dev_name, + const char **path) +{ + struct ceph_mount_args *args = &client->mount_args; + const char *c; + int err; + substring_t argstr[MAX_OPT_ARGS]; + int num_mon; + struct ceph_entity_addr mon_addr[CEPH_MAX_MON_MOUNT_ADDR]; + int i; + + dout("parse_mount_args dev_name '%s'\n", dev_name); + memset(args, 0, sizeof(*args)); + + /* start with defaults */ + args->sb_flags = flags; + args->flags = CEPH_OPT_DEFAULT; + args->osd_timeout = 5; /* seconds */ + args->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; /* seconds */ + args->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT; + args->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT; + args->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL); + args->cap_release_safety = CEPH_CAPS_PER_RELEASE * 4; + args->max_readdir = 1024; + + /* ip1[:port1][,ip2[:port2]...]:/subdir/in/fs */ + if (!dev_name) + return -EINVAL; + *path = strstr(dev_name, ":/"); + if (*path == NULL) { + pr_err("device name is missing path (no :/ in %s)\n", + dev_name); + return -EINVAL; + } + + /* get mon ip(s) */ + err = ceph_parse_ips(dev_name, *path, mon_addr, + CEPH_MAX_MON_MOUNT_ADDR, &num_mon); + if (err < 0) + return err; + + /* build initial monmap */ + client->monc.monmap = kzalloc(sizeof(*client->monc.monmap) + + num_mon*sizeof(client->monc.monmap->mon_inst[0]), + GFP_KERNEL); + if (!client->monc.monmap) + return -ENOMEM; + for (i = 0; i < num_mon; i++) { + client->monc.monmap->mon_inst[i].addr = mon_addr[i]; + client->monc.monmap->mon_inst[i].addr.erank = 0; + client->monc.monmap->mon_inst[i].addr.nonce = 0; + client->monc.monmap->mon_inst[i].name.type = + CEPH_ENTITY_TYPE_MON; + client->monc.monmap->mon_inst[i].name.num = cpu_to_le64(i); + } + client->monc.monmap->num_mon = num_mon; + memset(&args->my_addr.in_addr, 0, sizeof(args->my_addr.in_addr)); + + /* path on server */ + *path += 2; + dout("server path '%s'\n", *path); + + /* parse mount options */ + while ((c = strsep(&options, ",")) != NULL) { + int token, intval, ret; + if (!*c) + continue; + token = match_token((char *)c, arg_tokens, argstr); + if (token < 0) { + pr_err("bad mount option at '%s'\n", c); + return -EINVAL; + + } + if (token < Opt_ip) { + ret = match_int(&argstr[0], &intval); + if (ret < 0) { + pr_err("bad mount option arg (not int) " + "at '%s'\n", c); + continue; + } + dout("got token %d intval %d\n", token, intval); + } + switch (token) { + case Opt_fsidmajor: + *(__le64 *)&args->fsid.fsid[0] = cpu_to_le64(intval); + break; + case Opt_fsidminor: + *(__le64 *)&args->fsid.fsid[8] = cpu_to_le64(intval); + break; + case Opt_ip: + err = ceph_parse_ips(argstr[0].from, + argstr[0].to, + &args->my_addr, + 1, NULL); + if (err < 0) + return err; + args->flags |= CEPH_OPT_MYIP; + break; + + case Opt_snapdirname: + kfree(args->snapdir_name); + args->snapdir_name = kstrndup(argstr[0].from, + argstr[0].to-argstr[0].from, + GFP_KERNEL); + break; + case Opt_secret: + args->secret = kstrndup(argstr[0].from, + argstr[0].to-argstr[0].from, + GFP_KERNEL); + break; + + /* misc */ + case Opt_wsize: + args->wsize = intval; + break; + case Opt_rsize: + args->rsize = intval; + break; + case Opt_osdtimeout: + args->osd_timeout = intval; + break; + case Opt_mount_timeout: + args->mount_timeout = intval; + break; + case Opt_caps_wanted_delay_min: + args->caps_wanted_delay_min = intval; + break; + case Opt_caps_wanted_delay_max: + args->caps_wanted_delay_max = intval; + break; + case Opt_readdir_max_entries: + args->max_readdir = intval; + break; + + case Opt_noshare: + args->flags |= CEPH_OPT_NOSHARE; + break; + + case Opt_dirstat: + args->flags |= CEPH_OPT_DIRSTAT; + break; + case Opt_nodirstat: + args->flags &= ~CEPH_OPT_DIRSTAT; + break; + case Opt_rbytes: + args->flags |= CEPH_OPT_RBYTES; + break; + case Opt_norbytes: + args->flags &= ~CEPH_OPT_RBYTES; + break; + case Opt_nocrc: + args->flags |= CEPH_OPT_NOCRC; + break; + case Opt_noasyncreaddir: + args->flags |= CEPH_OPT_NOASYNCREADDIR; + break; + + default: + BUG_ON(token); + } + } + + return 0; +} + +static void release_mount_args(struct ceph_mount_args *args) +{ + kfree(args->snapdir_name); + args->snapdir_name = NULL; + kfree(args->secret); + args->secret = NULL; +} + +/* + * create a fresh client instance + */ +static struct ceph_client *ceph_create_client(void) +{ + struct ceph_client *client; + int err = -ENOMEM; + + client = kzalloc(sizeof(*client), GFP_KERNEL); + if (client == NULL) + return ERR_PTR(-ENOMEM); + + mutex_init(&client->mount_mutex); + + init_waitqueue_head(&client->mount_wq); + + client->sb = NULL; + client->mount_state = CEPH_MOUNT_MOUNTING; + client->whoami = -1; + + client->msgr = NULL; + + client->mount_err = 0; + client->signed_ticket = NULL; + client->signed_ticket_len = 0; + + err = -ENOMEM; + client->wb_wq = create_workqueue("ceph-writeback"); + if (client->wb_wq == NULL) + goto fail; + client->pg_inv_wq = create_singlethread_workqueue("ceph-pg-invalid"); + if (client->pg_inv_wq == NULL) + goto fail_wb_wq; + client->trunc_wq = create_singlethread_workqueue("ceph-trunc"); + if (client->trunc_wq == NULL) + goto fail_pg_inv_wq; + + /* subsystems */ + err = ceph_monc_init(&client->monc, client); + if (err < 0) + goto fail_trunc_wq; + err = ceph_osdc_init(&client->osdc, client); + if (err < 0) + goto fail_monc; + ceph_mdsc_init(&client->mdsc, client); + return client; + +fail_monc: + ceph_monc_stop(&client->monc); +fail_trunc_wq: + destroy_workqueue(client->trunc_wq); +fail_pg_inv_wq: + destroy_workqueue(client->pg_inv_wq); +fail_wb_wq: + destroy_workqueue(client->wb_wq); +fail: + kfree(client); + return ERR_PTR(err); +} + +static void ceph_destroy_client(struct ceph_client *client) +{ + dout("destroy_client %p\n", client); + + /* unmount */ + ceph_mdsc_stop(&client->mdsc); + ceph_monc_stop(&client->monc); + ceph_osdc_stop(&client->osdc); + + kfree(client->signed_ticket); + + ceph_debugfs_client_cleanup(client); + destroy_workqueue(client->wb_wq); + destroy_workqueue(client->pg_inv_wq); + destroy_workqueue(client->trunc_wq); + + if (client->msgr) + ceph_messenger_destroy(client->msgr); + if (client->wb_pagevec_pool) + mempool_destroy(client->wb_pagevec_pool); + + release_mount_args(&client->mount_args); + + kfree(client); + dout("destroy_client %p done\n", client); +} + +/* + * true if we have the mon map (and have thus joined the cluster) + */ +static int have_mon_map(struct ceph_client *client) +{ + return client->monc.monmap && client->monc.monmap->epoch; +} + +/* + * Bootstrap mount by opening the root directory. Note the mount + * @started time from caller, and time out if this takes too long. + */ +static struct dentry *open_root_dentry(struct ceph_client *client, + const char *path, + unsigned long started) +{ + struct ceph_mds_client *mdsc = &client->mdsc; + struct ceph_mds_request *req = NULL; + int err; + struct dentry *root; + + /* open dir */ + dout("open_root_inode opening '%s'\n", path); + req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS); + if (IS_ERR(req)) + return ERR_PTR(PTR_ERR(req)); + req->r_path1 = kstrdup(path, GFP_NOFS); + req->r_ino1.ino = CEPH_INO_ROOT; + req->r_ino1.snap = CEPH_NOSNAP; + req->r_started = started; + req->r_timeout = client->mount_args.mount_timeout * HZ; + req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE); + req->r_num_caps = 2; + err = ceph_mdsc_do_request(mdsc, NULL, req); + if (err == 0) { + dout("open_root_inode success\n"); + if (ceph_ino(req->r_target_inode) == CEPH_INO_ROOT && + client->sb->s_root == NULL) + root = d_alloc_root(req->r_target_inode); + else + root = d_obtain_alias(req->r_target_inode); + req->r_target_inode = NULL; + dout("open_root_inode success, root dentry is %p\n", root); + } else { + root = ERR_PTR(err); + } + ceph_mdsc_put_request(req); + return root; +} + +/* + * mount: join the ceph cluster, and open root directory. + */ +static int ceph_mount(struct ceph_client *client, struct vfsmount *mnt, + const char *path) +{ + struct ceph_entity_addr *myaddr = NULL; + int err; + unsigned long timeout = client->mount_args.mount_timeout * HZ; + unsigned long started = jiffies; /* note the start time */ + struct dentry *root; + + dout("mount start\n"); + mutex_lock(&client->mount_mutex); + + /* initialize the messenger */ + if (client->msgr == NULL) { + if (ceph_test_opt(client, MYIP)) + myaddr = &client->mount_args.my_addr; + client->msgr = ceph_messenger_create(myaddr); + if (IS_ERR(client->msgr)) { + err = PTR_ERR(client->msgr); + client->msgr = NULL; + goto out; + } + client->msgr->nocrc = ceph_test_opt(client, NOCRC); + } + + /* send mount request, and wait for mon, mds, and osd maps */ + err = ceph_monc_request_mount(&client->monc); + if (err < 0) + goto out; + + while (!have_mon_map(client) && !client->mount_err) { + err = -EIO; + if (timeout && time_after_eq(jiffies, started + timeout)) + goto out; + + /* wait */ + dout("mount waiting for mount\n"); + err = wait_event_interruptible_timeout(client->mount_wq, + client->mount_err || have_mon_map(client), + timeout); + if (err == -EINTR || err == -ERESTARTSYS) + goto out; + if (client->mount_err) { + err = client->mount_err; + goto out; + } + } + + dout("mount opening root\n"); + root = open_root_dentry(client, "", started); + if (IS_ERR(root)) { + err = PTR_ERR(root); + goto out; + } + if (client->sb->s_root) + dput(root); + else + client->sb->s_root = root; + + if (path[0] == 0) { + dget(root); + } else { + dout("mount opening base mountpoint\n"); + root = open_root_dentry(client, path, started); + if (IS_ERR(root)) { + err = PTR_ERR(root); + dput(client->sb->s_root); + client->sb->s_root = NULL; + goto out; + } + } + + mnt->mnt_root = root; + mnt->mnt_sb = client->sb; + + client->mount_state = CEPH_MOUNT_MOUNTED; + dout("mount success\n"); + err = 0; + +out: + mutex_unlock(&client->mount_mutex); + return err; +} + +static int ceph_set_super(struct super_block *s, void *data) +{ + struct ceph_client *client = data; + int ret; + + dout("set_super %p data %p\n", s, data); + + s->s_flags = client->mount_args.sb_flags; + s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */ + + s->s_fs_info = client; + client->sb = s; + + s->s_op = &ceph_super_ops; + s->s_export_op = &ceph_export_ops; + + s->s_time_gran = 1000; /* 1000 ns == 1 us */ + + ret = set_anon_super(s, NULL); /* what is that second arg for? */ + if (ret != 0) + goto fail; + + return ret; + +fail: + s->s_fs_info = NULL; + client->sb = NULL; + return ret; +} + +/* + * share superblock if same fs AND options + */ +static int ceph_compare_super(struct super_block *sb, void *data) +{ + struct ceph_client *new = data; + struct ceph_mount_args *args = &new->mount_args; + struct ceph_client *other = ceph_sb_to_client(sb); + int i; + + dout("ceph_compare_super %p\n", sb); + if (args->flags & CEPH_OPT_FSID) { + if (ceph_fsid_compare(&args->fsid, &other->fsid)) { + dout("fsid doesn't match\n"); + return 0; + } + } else { + /* do we share (a) monitor? */ + for (i = 0; i < new->monc.monmap->num_mon; i++) + if (ceph_monmap_contains(other->monc.monmap, + &new->monc.monmap->mon_inst[i].addr)) + break; + if (i == new->monc.monmap->num_mon) { + dout("mon ip not part of monmap\n"); + return 0; + } + dout("mon ip matches existing sb %p\n", sb); + } + if (args->sb_flags != other->mount_args.sb_flags) { + dout("flags differ\n"); + return 0; + } + return 1; +} + +/* + * construct our own bdi so we can control readahead, etc. + */ +static int ceph_init_bdi(struct super_block *sb, struct ceph_client *client) +{ + int err; + + err = bdi_init(&client->backing_dev_info); + if (err < 0) + return err; + + /* set ra_pages based on rsize mount option? */ + if (client->mount_args.rsize >= PAGE_CACHE_SIZE) + client->backing_dev_info.ra_pages = + (client->mount_args.rsize + PAGE_CACHE_SIZE - 1) + >> PAGE_SHIFT; + + err = bdi_register_dev(&client->backing_dev_info, sb->s_dev); + return err; +} + +static int ceph_get_sb(struct file_system_type *fs_type, + int flags, const char *dev_name, void *data, + struct vfsmount *mnt) +{ + struct super_block *sb; + struct ceph_client *client; + int err; + int (*compare_super)(struct super_block *, void *) = ceph_compare_super; + const char *path; + + dout("ceph_get_sb\n"); + + /* create client (which we may/may not use) */ + client = ceph_create_client(); + if (IS_ERR(client)) + return PTR_ERR(client); + + err = parse_mount_args(client, flags, data, dev_name, &path); + if (err < 0) + goto out; + + if (client->mount_args.flags & CEPH_OPT_NOSHARE) + compare_super = NULL; + sb = sget(fs_type, compare_super, ceph_set_super, client); + if (IS_ERR(sb)) { + err = PTR_ERR(sb); + goto out; + } + + if (ceph_client(sb) != client) { + ceph_destroy_client(client); + client = ceph_client(sb); + dout("get_sb got existing client %p\n", client); + } else { + dout("get_sb using new client %p\n", client); + + /* set up mempools */ + err = -ENOMEM; + client->wb_pagevec_pool = mempool_create_kmalloc_pool(10, + client->mount_args.wsize >> PAGE_CACHE_SHIFT); + if (!client->wb_pagevec_pool) + goto out_splat; + + err = ceph_init_bdi(sb, client); + if (err < 0) + goto out_splat; + } + + err = ceph_mount(client, mnt, path); + if (err < 0) + goto out_splat; + dout("root %p inode %p ino %llx.%llx\n", mnt->mnt_root, + mnt->mnt_root->d_inode, ceph_vinop(mnt->mnt_root->d_inode)); + return 0; + +out_splat: + ceph_mdsc_close_sessions(&client->mdsc); + up_write(&sb->s_umount); + deactivate_super(sb); + goto out_final; + +out: + ceph_destroy_client(client); +out_final: + dout("ceph_get_sb fail %d\n", err); + return err; +} + +static void ceph_kill_sb(struct super_block *s) +{ + struct ceph_client *client = ceph_sb_to_client(s); + dout("kill_sb %p\n", s); + ceph_mdsc_pre_umount(&client->mdsc); + bdi_unregister(&client->backing_dev_info); + kill_anon_super(s); /* will call put_super after sb is r/o */ + bdi_destroy(&client->backing_dev_info); + ceph_destroy_client(client); +} + +static struct file_system_type ceph_fs_type = { + .owner = THIS_MODULE, + .name = "ceph", + .get_sb = ceph_get_sb, + .kill_sb = ceph_kill_sb, + .fs_flags = FS_RENAME_DOES_D_MOVE, +}; + +#define _STRINGIFY(x) #x +#define STRINGIFY(x) _STRINGIFY(x) + +static int __init init_ceph(void) +{ + int ret = 0; + + ret = ceph_debugfs_init(); + if (ret < 0) + goto out; + + ret = ceph_msgr_init(); + if (ret < 0) + goto out_debugfs; + + ret = init_caches(); + if (ret) + goto out_msgr; + + ceph_caps_init(); + + ret = register_filesystem(&ceph_fs_type); + if (ret) + goto out_icache; + + pr_info("loaded (%s)\n", STRINGIFY(CEPH_GIT_VER)); + return 0; + +out_icache: + destroy_caches(); +out_msgr: + ceph_msgr_exit(); +out_debugfs: + ceph_debugfs_cleanup(); +out: + return ret; +} + +static void __exit exit_ceph(void) +{ + dout("exit_ceph\n"); + unregister_filesystem(&ceph_fs_type); + ceph_caps_finalize(); + destroy_caches(); + ceph_msgr_exit(); + ceph_debugfs_cleanup(); +} + +module_init(init_ceph); +module_exit(exit_ceph); + +MODULE_AUTHOR("Sage Weil "); +MODULE_AUTHOR("Yehuda Sadeh "); +MODULE_AUTHOR("Patience Warnick "); +MODULE_DESCRIPTION("Ceph filesystem for Linux"); +MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 355da1eb7a1f91c276b991764e951bbcd8047599 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 6 Oct 2009 11:31:08 -0700 Subject: ceph: inode operations Inode cache and inode operations. We also include routines to incorporate metadata structures returned by the MDS into the client cache, and some helpers to deal with file capabilities and metadata leases. The bulk of that work is done by fill_inode() and fill_trace(). Signed-off-by: Sage Weil --- fs/ceph/inode.c | 1620 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ fs/ceph/xattr.c | 833 ++++++++++++++++++++++++++++ 2 files changed, 2453 insertions(+) create mode 100644 fs/ceph/inode.c create mode 100644 fs/ceph/xattr.c diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c new file mode 100644 index 000000000000..6097af790047 --- /dev/null +++ b/fs/ceph/inode.c @@ -0,0 +1,1620 @@ +#include "ceph_debug.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "super.h" +#include "decode.h" + +/* + * Ceph inode operations + * + * Implement basic inode helpers (get, alloc) and inode ops (getattr, + * setattr, etc.), xattr helpers, and helpers for assimilating + * metadata returned by the MDS into our cache. + * + * Also define helpers for doing asynchronous writeback, invalidation, + * and truncation for the benefit of those who can't afford to block + * (typically because they are in the message handler path). + */ + +static const struct inode_operations ceph_symlink_iops; + +static void ceph_inode_invalidate_pages(struct work_struct *work); + +/* + * find or create an inode, given the ceph ino number + */ +struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino) +{ + struct inode *inode; + ino_t t = ceph_vino_to_ino(vino); + + inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino); + if (inode == NULL) + return ERR_PTR(-ENOMEM); + if (inode->i_state & I_NEW) { + dout("get_inode created new inode %p %llx.%llx ino %llx\n", + inode, ceph_vinop(inode), (u64)inode->i_ino); + unlock_new_inode(inode); + } + + dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino, + vino.snap, inode); + return inode; +} + +/* + * get/constuct snapdir inode for a given directory + */ +struct inode *ceph_get_snapdir(struct inode *parent) +{ + struct ceph_vino vino = { + .ino = ceph_ino(parent), + .snap = CEPH_SNAPDIR, + }; + struct inode *inode = ceph_get_inode(parent->i_sb, vino); + + BUG_ON(!S_ISDIR(parent->i_mode)); + if (IS_ERR(inode)) + return ERR_PTR(PTR_ERR(inode)); + inode->i_mode = parent->i_mode; + inode->i_uid = parent->i_uid; + inode->i_gid = parent->i_gid; + inode->i_op = &ceph_dir_iops; + inode->i_fop = &ceph_dir_fops; + ceph_inode(inode)->i_snap_caps = CEPH_CAP_PIN; /* so we can open */ + return inode; +} + +const struct inode_operations ceph_file_iops = { + .permission = ceph_permission, + .setattr = ceph_setattr, + .getattr = ceph_getattr, + .setxattr = ceph_setxattr, + .getxattr = ceph_getxattr, + .listxattr = ceph_listxattr, + .removexattr = ceph_removexattr, +}; + + +/* + * We use a 'frag tree' to keep track of the MDS's directory fragments + * for a given inode (usually there is just a single fragment). We + * need to know when a child frag is delegated to a new MDS, or when + * it is flagged as replicated, so we can direct our requests + * accordingly. + */ + +/* + * find/create a frag in the tree + */ +static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci, + u32 f) +{ + struct rb_node **p; + struct rb_node *parent = NULL; + struct ceph_inode_frag *frag; + int c; + + p = &ci->i_fragtree.rb_node; + while (*p) { + parent = *p; + frag = rb_entry(parent, struct ceph_inode_frag, node); + c = ceph_frag_compare(f, frag->frag); + if (c < 0) + p = &(*p)->rb_left; + else if (c > 0) + p = &(*p)->rb_right; + else + return frag; + } + + frag = kmalloc(sizeof(*frag), GFP_NOFS); + if (!frag) { + pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx " + "frag %x\n", &ci->vfs_inode, + ceph_vinop(&ci->vfs_inode), f); + return ERR_PTR(-ENOMEM); + } + frag->frag = f; + frag->split_by = 0; + frag->mds = -1; + frag->ndist = 0; + + rb_link_node(&frag->node, parent, p); + rb_insert_color(&frag->node, &ci->i_fragtree); + + dout("get_or_create_frag added %llx.%llx frag %x\n", + ceph_vinop(&ci->vfs_inode), f); + return frag; +} + +/* + * find a specific frag @f + */ +struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f) +{ + struct rb_node *n = ci->i_fragtree.rb_node; + + while (n) { + struct ceph_inode_frag *frag = + rb_entry(n, struct ceph_inode_frag, node); + int c = ceph_frag_compare(f, frag->frag); + if (c < 0) + n = n->rb_left; + else if (c > 0) + n = n->rb_right; + else + return frag; + } + return NULL; +} + +/* + * Choose frag containing the given value @v. If @pfrag is + * specified, copy the frag delegation info to the caller if + * it is present. + */ +u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v, + struct ceph_inode_frag *pfrag, + int *found) +{ + u32 t = ceph_frag_make(0, 0); + struct ceph_inode_frag *frag; + unsigned nway, i; + u32 n; + + if (found) + *found = 0; + + mutex_lock(&ci->i_fragtree_mutex); + while (1) { + WARN_ON(!ceph_frag_contains_value(t, v)); + frag = __ceph_find_frag(ci, t); + if (!frag) + break; /* t is a leaf */ + if (frag->split_by == 0) { + if (pfrag) + memcpy(pfrag, frag, sizeof(*pfrag)); + if (found) + *found = 1; + break; + } + + /* choose child */ + nway = 1 << frag->split_by; + dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t, + frag->split_by, nway); + for (i = 0; i < nway; i++) { + n = ceph_frag_make_child(t, frag->split_by, i); + if (ceph_frag_contains_value(n, v)) { + t = n; + break; + } + } + BUG_ON(i == nway); + } + dout("choose_frag(%x) = %x\n", v, t); + + mutex_unlock(&ci->i_fragtree_mutex); + return t; +} + +/* + * Process dirfrag (delegation) info from the mds. Include leaf + * fragment in tree ONLY if ndist > 0. Otherwise, only + * branches/splits are included in i_fragtree) + */ +static int ceph_fill_dirfrag(struct inode *inode, + struct ceph_mds_reply_dirfrag *dirinfo) +{ + struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_inode_frag *frag; + u32 id = le32_to_cpu(dirinfo->frag); + int mds = le32_to_cpu(dirinfo->auth); + int ndist = le32_to_cpu(dirinfo->ndist); + int i; + int err = 0; + + mutex_lock(&ci->i_fragtree_mutex); + if (ndist == 0) { + /* no delegation info needed. */ + frag = __ceph_find_frag(ci, id); + if (!frag) + goto out; + if (frag->split_by == 0) { + /* tree leaf, remove */ + dout("fill_dirfrag removed %llx.%llx frag %x" + " (no ref)\n", ceph_vinop(inode), id); + rb_erase(&frag->node, &ci->i_fragtree); + kfree(frag); + } else { + /* tree branch, keep and clear */ + dout("fill_dirfrag cleared %llx.%llx frag %x" + " referral\n", ceph_vinop(inode), id); + frag->mds = -1; + frag->ndist = 0; + } + goto out; + } + + + /* find/add this frag to store mds delegation info */ + frag = __get_or_create_frag(ci, id); + if (IS_ERR(frag)) { + /* this is not the end of the world; we can continue + with bad/inaccurate delegation info */ + pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n", + ceph_vinop(inode), le32_to_cpu(dirinfo->frag)); + err = -ENOMEM; + goto out; + } + + frag->mds = mds; + frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP); + for (i = 0; i < frag->ndist; i++) + frag->dist[i] = le32_to_cpu(dirinfo->dist[i]); + dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n", + ceph_vinop(inode), frag->frag, frag->ndist); + +out: + mutex_unlock(&ci->i_fragtree_mutex); + return err; +} + + +/* + * initialize a newly allocated inode. + */ +struct inode *ceph_alloc_inode(struct super_block *sb) +{ + struct ceph_inode_info *ci; + int i; + + ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS); + if (!ci) + return NULL; + + dout("alloc_inode %p\n", &ci->vfs_inode); + + ci->i_version = 0; + ci->i_time_warp_seq = 0; + ci->i_ceph_flags = 0; + ci->i_release_count = 0; + ci->i_symlink = NULL; + + ci->i_fragtree = RB_ROOT; + mutex_init(&ci->i_fragtree_mutex); + + ci->i_xattrs.blob = NULL; + ci->i_xattrs.prealloc_blob = NULL; + ci->i_xattrs.dirty = false; + ci->i_xattrs.index = RB_ROOT; + ci->i_xattrs.count = 0; + ci->i_xattrs.names_size = 0; + ci->i_xattrs.vals_size = 0; + ci->i_xattrs.version = 0; + ci->i_xattrs.index_version = 0; + + ci->i_caps = RB_ROOT; + ci->i_auth_cap = NULL; + ci->i_dirty_caps = 0; + ci->i_flushing_caps = 0; + INIT_LIST_HEAD(&ci->i_dirty_item); + INIT_LIST_HEAD(&ci->i_flushing_item); + ci->i_cap_flush_seq = 0; + ci->i_cap_flush_last_tid = 0; + memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid)); + init_waitqueue_head(&ci->i_cap_wq); + ci->i_hold_caps_min = 0; + ci->i_hold_caps_max = 0; + INIT_LIST_HEAD(&ci->i_cap_delay_list); + ci->i_cap_exporting_mds = 0; + ci->i_cap_exporting_mseq = 0; + ci->i_cap_exporting_issued = 0; + INIT_LIST_HEAD(&ci->i_cap_snaps); + ci->i_head_snapc = NULL; + ci->i_snap_caps = 0; + + for (i = 0; i < CEPH_FILE_MODE_NUM; i++) + ci->i_nr_by_mode[i] = 0; + + ci->i_truncate_seq = 0; + ci->i_truncate_size = 0; + ci->i_truncate_pending = 0; + + ci->i_max_size = 0; + ci->i_reported_size = 0; + ci->i_wanted_max_size = 0; + ci->i_requested_max_size = 0; + + ci->i_pin_ref = 0; + ci->i_rd_ref = 0; + ci->i_rdcache_ref = 0; + ci->i_wr_ref = 0; + ci->i_wrbuffer_ref = 0; + ci->i_wrbuffer_ref_head = 0; + ci->i_shared_gen = 0; + ci->i_rdcache_gen = 0; + ci->i_rdcache_revoking = 0; + + INIT_LIST_HEAD(&ci->i_unsafe_writes); + INIT_LIST_HEAD(&ci->i_unsafe_dirops); + spin_lock_init(&ci->i_unsafe_lock); + + ci->i_snap_realm = NULL; + INIT_LIST_HEAD(&ci->i_snap_realm_item); + INIT_LIST_HEAD(&ci->i_snap_flush_item); + + INIT_WORK(&ci->i_wb_work, ceph_inode_writeback); + INIT_WORK(&ci->i_pg_inv_work, ceph_inode_invalidate_pages); + + INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work); + + return &ci->vfs_inode; +} + +void ceph_destroy_inode(struct inode *inode) +{ + struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_inode_frag *frag; + struct rb_node *n; + + dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode)); + + ceph_queue_caps_release(inode); + + kfree(ci->i_symlink); + while ((n = rb_first(&ci->i_fragtree)) != NULL) { + frag = rb_entry(n, struct ceph_inode_frag, node); + rb_erase(n, &ci->i_fragtree); + kfree(frag); + } + + __ceph_destroy_xattrs(ci); + ceph_buffer_put(ci->i_xattrs.blob); + ceph_buffer_put(ci->i_xattrs.prealloc_blob); + + kmem_cache_free(ceph_inode_cachep, ci); +} + + +/* + * Helpers to fill in size, ctime, mtime, and atime. We have to be + * careful because either the client or MDS may have more up to date + * info, depending on which capabilities are held, and whether + * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime + * and size are monotonically increasing, except when utimes() or + * truncate() increments the corresponding _seq values.) + */ +int ceph_fill_file_size(struct inode *inode, int issued, + u32 truncate_seq, u64 truncate_size, u64 size) +{ + struct ceph_inode_info *ci = ceph_inode(inode); + int queue_trunc = 0; + + if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 || + (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) { + dout("size %lld -> %llu\n", inode->i_size, size); + inode->i_size = size; + inode->i_blocks = (size + (1<<9) - 1) >> 9; + ci->i_reported_size = size; + if (truncate_seq != ci->i_truncate_seq) { + dout("truncate_seq %u -> %u\n", + ci->i_truncate_seq, truncate_seq); + ci->i_truncate_seq = truncate_seq; + if (issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_RD| + CEPH_CAP_FILE_WR|CEPH_CAP_FILE_BUFFER| + CEPH_CAP_FILE_EXCL)) { + ci->i_truncate_pending++; + queue_trunc = 1; + } + } + } + if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 && + ci->i_truncate_size != truncate_size) { + dout("truncate_size %lld -> %llu\n", ci->i_truncate_size, + truncate_size); + ci->i_truncate_size = truncate_size; + } + return queue_trunc; +} + +void ceph_fill_file_time(struct inode *inode, int issued, + u64 time_warp_seq, struct timespec *ctime, + struct timespec *mtime, struct timespec *atime) +{ + struct ceph_inode_info *ci = ceph_inode(inode); + int warn = 0; + + if (issued & (CEPH_CAP_FILE_EXCL| + CEPH_CAP_FILE_WR| + CEPH_CAP_FILE_BUFFER)) { + if (timespec_compare(ctime, &inode->i_ctime) > 0) { + dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n", + inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, + ctime->tv_sec, ctime->tv_nsec); + inode->i_ctime = *ctime; + } + if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) { + /* the MDS did a utimes() */ + dout("mtime %ld.%09ld -> %ld.%09ld " + "tw %d -> %d\n", + inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec, + mtime->tv_sec, mtime->tv_nsec, + ci->i_time_warp_seq, (int)time_warp_seq); + + inode->i_mtime = *mtime; + inode->i_atime = *atime; + ci->i_time_warp_seq = time_warp_seq; + } else if (time_warp_seq == ci->i_time_warp_seq) { + /* nobody did utimes(); take the max */ + if (timespec_compare(mtime, &inode->i_mtime) > 0) { + dout("mtime %ld.%09ld -> %ld.%09ld inc\n", + inode->i_mtime.tv_sec, + inode->i_mtime.tv_nsec, + mtime->tv_sec, mtime->tv_nsec); + inode->i_mtime = *mtime; + } + if (timespec_compare(atime, &inode->i_atime) > 0) { + dout("atime %ld.%09ld -> %ld.%09ld inc\n", + inode->i_atime.tv_sec, + inode->i_atime.tv_nsec, + atime->tv_sec, atime->tv_nsec); + inode->i_atime = *atime; + } + } else if (issued & CEPH_CAP_FILE_EXCL) { + /* we did a utimes(); ignore mds values */ + } else { + warn = 1; + } + } else { + /* we have no write caps; whatever the MDS says is true */ + if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) { + inode->i_ctime = *ctime; + inode->i_mtime = *mtime; + inode->i_atime = *atime; + ci->i_time_warp_seq = time_warp_seq; + } else { + warn = 1; + } + } + if (warn) /* time_warp_seq shouldn't go backwards */ + dout("%p mds time_warp_seq %llu < %u\n", + inode, time_warp_seq, ci->i_time_warp_seq); +} + +/* + * Populate an inode based on info from mds. May be called on new or + * existing inodes. + */ +static int fill_inode(struct inode *inode, + struct ceph_mds_reply_info_in *iinfo, + struct ceph_mds_reply_dirfrag *dirinfo, + struct ceph_mds_session *session, + unsigned long ttl_from, int cap_fmode, + struct ceph_cap_reservation *caps_reservation) +{ + struct ceph_mds_reply_inode *info = iinfo->in; + struct ceph_inode_info *ci = ceph_inode(inode); + int i; + int issued, implemented; + struct timespec mtime, atime, ctime; + u32 nsplits; + struct ceph_buffer *xattr_blob = NULL; + int err = 0; + int queue_trunc = 0; + + dout("fill_inode %p ino %llx.%llx v %llu had %llu\n", + inode, ceph_vinop(inode), le64_to_cpu(info->version), + ci->i_version); + + /* + * prealloc xattr data, if it looks like we'll need it. only + * if len > 4 (meaning there are actually xattrs; the first 4 + * bytes are the xattr count). + */ + if (iinfo->xattr_len > 4) { + xattr_blob = ceph_buffer_new_alloc(iinfo->xattr_len, GFP_NOFS); + if (!xattr_blob) + pr_err("fill_inode ENOMEM xattr blob %d bytes\n", + iinfo->xattr_len); + } + + spin_lock(&inode->i_lock); + + /* + * provided version will be odd if inode value is projected, + * even if stable. skip the update if we have a newer info + * (e.g., due to inode info racing form multiple MDSs), or if + * we are getting projected (unstable) inode info. + */ + if (le64_to_cpu(info->version) > 0 && + (ci->i_version & ~1) > le64_to_cpu(info->version)) + goto no_change; + + issued = __ceph_caps_issued(ci, &implemented); + issued |= implemented | __ceph_caps_dirty(ci); + + /* update inode */ + ci->i_version = le64_to_cpu(info->version); + inode->i_version++; + inode->i_rdev = le32_to_cpu(info->rdev); + + if ((issued & CEPH_CAP_AUTH_EXCL) == 0) { + inode->i_mode = le32_to_cpu(info->mode); + inode->i_uid = le32_to_cpu(info->uid); + inode->i_gid = le32_to_cpu(info->gid); + dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode, + inode->i_uid, inode->i_gid); + } + + if ((issued & CEPH_CAP_LINK_EXCL) == 0) + inode->i_nlink = le32_to_cpu(info->nlink); + + /* be careful with mtime, atime, size */ + ceph_decode_timespec(&atime, &info->atime); + ceph_decode_timespec(&mtime, &info->mtime); + ceph_decode_timespec(&ctime, &info->ctime); + queue_trunc = ceph_fill_file_size(inode, issued, + le32_to_cpu(info->truncate_seq), + le64_to_cpu(info->truncate_size), + S_ISDIR(inode->i_mode) ? + ci->i_rbytes : + le64_to_cpu(info->size)); + ceph_fill_file_time(inode, issued, + le32_to_cpu(info->time_warp_seq), + &ctime, &mtime, &atime); + + ci->i_max_size = le64_to_cpu(info->max_size); + ci->i_layout = info->layout; + inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1; + + /* xattrs */ + /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */ + if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && + le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) { + if (ci->i_xattrs.blob) + ceph_buffer_put(ci->i_xattrs.blob); + ci->i_xattrs.blob = xattr_blob; + if (xattr_blob) + memcpy(ci->i_xattrs.blob->vec.iov_base, + iinfo->xattr_data, iinfo->xattr_len); + ci->i_xattrs.version = le64_to_cpu(info->xattr_version); + } + + inode->i_mapping->a_ops = &ceph_aops; + inode->i_mapping->backing_dev_info = + &ceph_client(inode->i_sb)->backing_dev_info; + + switch (inode->i_mode & S_IFMT) { + case S_IFIFO: + case S_IFBLK: + case S_IFCHR: + case S_IFSOCK: + init_special_inode(inode, inode->i_mode, inode->i_rdev); + inode->i_op = &ceph_file_iops; + break; + case S_IFREG: + inode->i_op = &ceph_file_iops; + inode->i_fop = &ceph_file_fops; + break; + case S_IFLNK: + inode->i_op = &ceph_symlink_iops; + if (!ci->i_symlink) { + int symlen = iinfo->symlink_len; + char *sym; + + BUG_ON(symlen != inode->i_size); + spin_unlock(&inode->i_lock); + + err = -ENOMEM; + sym = kmalloc(symlen+1, GFP_NOFS); + if (!sym) + goto out; + memcpy(sym, iinfo->symlink, symlen); + sym[symlen] = 0; + + spin_lock(&inode->i_lock); + if (!ci->i_symlink) + ci->i_symlink = sym; + else + kfree(sym); /* lost a race */ + } + break; + case S_IFDIR: + inode->i_op = &ceph_dir_iops; + inode->i_fop = &ceph_dir_fops; + + ci->i_files = le64_to_cpu(info->files); + ci->i_subdirs = le64_to_cpu(info->subdirs); + ci->i_rbytes = le64_to_cpu(info->rbytes); + ci->i_rfiles = le64_to_cpu(info->rfiles); + ci->i_rsubdirs = le64_to_cpu(info->rsubdirs); + ceph_decode_timespec(&ci->i_rctime, &info->rctime); + + /* set dir completion flag? */ + if (ci->i_files == 0 && ci->i_subdirs == 0 && + ceph_snap(inode) == CEPH_NOSNAP && + (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED)) { + dout(" marking %p complete (empty)\n", inode); + ci->i_ceph_flags |= CEPH_I_COMPLETE; + ci->i_max_offset = 2; + } + + /* it may be better to set st_size in getattr instead? */ + if (ceph_test_opt(ceph_client(inode->i_sb), RBYTES)) + inode->i_size = ci->i_rbytes; + break; + default: + pr_err("fill_inode %llx.%llx BAD mode 0%o\n", + ceph_vinop(inode), inode->i_mode); + } + +no_change: + spin_unlock(&inode->i_lock); + + /* queue truncate if we saw i_size decrease */ + if (queue_trunc) + if (queue_work(ceph_client(inode->i_sb)->trunc_wq, + &ci->i_vmtruncate_work)) + igrab(inode); + + /* populate frag tree */ + /* FIXME: move me up, if/when version reflects fragtree changes */ + nsplits = le32_to_cpu(info->fragtree.nsplits); + mutex_lock(&ci->i_fragtree_mutex); + for (i = 0; i < nsplits; i++) { + u32 id = le32_to_cpu(info->fragtree.splits[i].frag); + struct ceph_inode_frag *frag = __get_or_create_frag(ci, id); + + if (IS_ERR(frag)) + continue; + frag->split_by = le32_to_cpu(info->fragtree.splits[i].by); + dout(" frag %x split by %d\n", frag->frag, frag->split_by); + } + mutex_unlock(&ci->i_fragtree_mutex); + + /* were we issued a capability? */ + if (info->cap.caps) { + if (ceph_snap(inode) == CEPH_NOSNAP) { + ceph_add_cap(inode, session, + le64_to_cpu(info->cap.cap_id), + cap_fmode, + le32_to_cpu(info->cap.caps), + le32_to_cpu(info->cap.wanted), + le32_to_cpu(info->cap.seq), + le32_to_cpu(info->cap.mseq), + le64_to_cpu(info->cap.realm), + info->cap.flags, + caps_reservation); + } else { + spin_lock(&inode->i_lock); + dout(" %p got snap_caps %s\n", inode, + ceph_cap_string(le32_to_cpu(info->cap.caps))); + ci->i_snap_caps |= le32_to_cpu(info->cap.caps); + if (cap_fmode >= 0) + __ceph_get_fmode(ci, cap_fmode); + spin_unlock(&inode->i_lock); + } + } + + /* update delegation info? */ + if (dirinfo) + ceph_fill_dirfrag(inode, dirinfo); + + err = 0; + +out: + ceph_buffer_put(xattr_blob); + return err; +} + +/* + * caller should hold session s_mutex. + */ +static void update_dentry_lease(struct dentry *dentry, + struct ceph_mds_reply_lease *lease, + struct ceph_mds_session *session, + unsigned long from_time) +{ + struct ceph_dentry_info *di = ceph_dentry(dentry); + long unsigned duration = le32_to_cpu(lease->duration_ms); + long unsigned ttl = from_time + (duration * HZ) / 1000; + long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000; + struct inode *dir; + + /* only track leases on regular dentries */ + if (dentry->d_op != &ceph_dentry_ops) + return; + + spin_lock(&dentry->d_lock); + dout("update_dentry_lease %p mask %d duration %lu ms ttl %lu\n", + dentry, le16_to_cpu(lease->mask), duration, ttl); + + /* make lease_rdcache_gen match directory */ + dir = dentry->d_parent->d_inode; + di->lease_shared_gen = ceph_inode(dir)->i_shared_gen; + + if (lease->mask == 0) + goto out_unlock; + + if (di->lease_gen == session->s_cap_gen && + time_before(ttl, dentry->d_time)) + goto out_unlock; /* we already have a newer lease. */ + + if (di->lease_session && di->lease_session != session) + goto out_unlock; + + ceph_dentry_lru_touch(dentry); + + if (!di->lease_session) + di->lease_session = ceph_get_mds_session(session); + di->lease_gen = session->s_cap_gen; + di->lease_seq = le32_to_cpu(lease->seq); + di->lease_renew_after = half_ttl; + di->lease_renew_from = 0; + dentry->d_time = ttl; +out_unlock: + spin_unlock(&dentry->d_lock); + return; +} + +/* + * splice a dentry to an inode. + * caller must hold directory i_mutex for this to be safe. + * + * we will only rehash the resulting dentry if @prehash is + * true; @prehash will be set to false (for the benefit of + * the caller) if we fail. + */ +static struct dentry *splice_dentry(struct dentry *dn, struct inode *in, + bool *prehash) +{ + struct dentry *realdn; + + /* dn must be unhashed */ + if (!d_unhashed(dn)) + d_drop(dn); + realdn = d_materialise_unique(dn, in); + if (IS_ERR(realdn)) { + pr_err("splice_dentry error %p inode %p ino %llx.%llx\n", + dn, in, ceph_vinop(in)); + if (prehash) + *prehash = false; /* don't rehash on error */ + dn = realdn; /* note realdn contains the error */ + goto out; + } else if (realdn) { + dout("dn %p (%d) spliced with %p (%d) " + "inode %p ino %llx.%llx\n", + dn, atomic_read(&dn->d_count), + realdn, atomic_read(&realdn->d_count), + realdn->d_inode, ceph_vinop(realdn->d_inode)); + dput(dn); + dn = realdn; + } else { + BUG_ON(!ceph_dentry(dn)); + + dout("dn %p attached to %p ino %llx.%llx\n", + dn, dn->d_inode, ceph_vinop(dn->d_inode)); + } + if ((!prehash || *prehash) && d_unhashed(dn)) + d_rehash(dn); +out: + return dn; +} + +/* + * Incorporate results into the local cache. This is either just + * one inode, or a directory, dentry, and possibly linked-to inode (e.g., + * after a lookup). + * + * A reply may contain + * a directory inode along with a dentry. + * and/or a target inode + * + * Called with snap_rwsem (read). + */ +int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, + struct ceph_mds_session *session) +{ + struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; + struct inode *in = NULL; + struct ceph_mds_reply_inode *ininfo; + struct ceph_vino vino; + int i = 0; + int err = 0; + + dout("fill_trace %p is_dentry %d is_target %d\n", req, + rinfo->head->is_dentry, rinfo->head->is_target); + +#if 0 + /* + * Debugging hook: + * + * If we resend completed ops to a recovering mds, we get no + * trace. Since that is very rare, pretend this is the case + * to ensure the 'no trace' handlers in the callers behave. + * + * Fill in inodes unconditionally to avoid breaking cap + * invariants. + */ + if (rinfo->head->op & CEPH_MDS_OP_WRITE) { + pr_info("fill_trace faking empty trace on %lld %s\n", + req->r_tid, ceph_mds_op_name(rinfo->head->op)); + if (rinfo->head->is_dentry) { + rinfo->head->is_dentry = 0; + err = fill_inode(req->r_locked_dir, + &rinfo->diri, rinfo->dirfrag, + session, req->r_request_started, -1); + } + if (rinfo->head->is_target) { + rinfo->head->is_target = 0; + ininfo = rinfo->targeti.in; + vino.ino = le64_to_cpu(ininfo->ino); + vino.snap = le64_to_cpu(ininfo->snapid); + in = ceph_get_inode(sb, vino); + err = fill_inode(in, &rinfo->targeti, NULL, + session, req->r_request_started, + req->r_fmode); + iput(in); + } + } +#endif + + if (!rinfo->head->is_target && !rinfo->head->is_dentry) { + dout("fill_trace reply is empty!\n"); + if (rinfo->head->result == 0 && req->r_locked_dir) { + struct ceph_inode_info *ci = + ceph_inode(req->r_locked_dir); + dout(" clearing %p complete (empty trace)\n", + req->r_locked_dir); + ci->i_ceph_flags &= ~CEPH_I_COMPLETE; + ci->i_release_count++; + } + return 0; + } + + if (rinfo->head->is_dentry) { + /* + * lookup link rename : null -> possibly existing inode + * mknod symlink mkdir : null -> new inode + * unlink : linked -> null + */ + struct inode *dir = req->r_locked_dir; + struct dentry *dn = req->r_dentry; + bool have_dir_cap, have_lease; + + BUG_ON(!dn); + BUG_ON(!dir); + BUG_ON(dn->d_parent->d_inode != dir); + BUG_ON(ceph_ino(dir) != + le64_to_cpu(rinfo->diri.in->ino)); + BUG_ON(ceph_snap(dir) != + le64_to_cpu(rinfo->diri.in->snapid)); + + err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag, + session, req->r_request_started, -1, + &req->r_caps_reservation); + if (err < 0) + return err; + + /* do we have a lease on the whole dir? */ + have_dir_cap = + (le32_to_cpu(rinfo->diri.in->cap.caps) & + CEPH_CAP_FILE_SHARED); + + /* do we have a dn lease? */ + have_lease = have_dir_cap || + (le16_to_cpu(rinfo->dlease->mask) & + CEPH_LOCK_DN); + + if (!have_lease) + dout("fill_trace no dentry lease or dir cap\n"); + + /* rename? */ + if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) { + dout(" src %p '%.*s' dst %p '%.*s'\n", + req->r_old_dentry, + req->r_old_dentry->d_name.len, + req->r_old_dentry->d_name.name, + dn, dn->d_name.len, dn->d_name.name); + dout("fill_trace doing d_move %p -> %p\n", + req->r_old_dentry, dn); + d_move(req->r_old_dentry, dn); + dout(" src %p '%.*s' dst %p '%.*s'\n", + req->r_old_dentry, + req->r_old_dentry->d_name.len, + req->r_old_dentry->d_name.name, + dn, dn->d_name.len, dn->d_name.name); + /* take overwritten dentry's readdir offset */ + ceph_dentry(req->r_old_dentry)->offset = + ceph_dentry(dn)->offset; + dn = req->r_old_dentry; /* use old_dentry */ + in = dn->d_inode; + } + + /* null dentry? */ + if (!rinfo->head->is_target) { + dout("fill_trace null dentry\n"); + if (dn->d_inode) { + dout("d_delete %p\n", dn); + d_delete(dn); + } else { + dout("d_instantiate %p NULL\n", dn); + d_instantiate(dn, NULL); + if (have_lease && d_unhashed(dn)) + d_rehash(dn); + update_dentry_lease(dn, rinfo->dlease, + session, + req->r_request_started); + } + goto done; + } + + /* attach proper inode */ + ininfo = rinfo->targeti.in; + vino.ino = le64_to_cpu(ininfo->ino); + vino.snap = le64_to_cpu(ininfo->snapid); + if (!dn->d_inode) { + in = ceph_get_inode(sb, vino); + if (IS_ERR(in)) { + pr_err("fill_trace bad get_inode " + "%llx.%llx\n", vino.ino, vino.snap); + err = PTR_ERR(in); + d_delete(dn); + goto done; + } + dn = splice_dentry(dn, in, &have_lease); + if (IS_ERR(dn)) { + err = PTR_ERR(dn); + goto done; + } + req->r_dentry = dn; /* may have spliced */ + igrab(in); + } else if (ceph_ino(in) == vino.ino && + ceph_snap(in) == vino.snap) { + igrab(in); + } else { + dout(" %p links to %p %llx.%llx, not %llx.%llx\n", + dn, in, ceph_ino(in), ceph_snap(in), + vino.ino, vino.snap); + have_lease = false; + in = NULL; + } + + if (have_lease) + update_dentry_lease(dn, rinfo->dlease, session, + req->r_request_started); + dout(" final dn %p\n", dn); + i++; + } else if (req->r_op == CEPH_MDS_OP_LOOKUPSNAP || + req->r_op == CEPH_MDS_OP_MKSNAP) { + struct dentry *dn = req->r_dentry; + + /* fill out a snapdir LOOKUPSNAP dentry */ + BUG_ON(!dn); + BUG_ON(!req->r_locked_dir); + BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR); + ininfo = rinfo->targeti.in; + vino.ino = le64_to_cpu(ininfo->ino); + vino.snap = le64_to_cpu(ininfo->snapid); + in = ceph_get_inode(sb, vino); + if (IS_ERR(in)) { + pr_err("fill_inode get_inode badness %llx.%llx\n", + vino.ino, vino.snap); + err = PTR_ERR(in); + d_delete(dn); + goto done; + } + dout(" linking snapped dir %p to dn %p\n", in, dn); + dn = splice_dentry(dn, in, NULL); + if (IS_ERR(dn)) { + err = PTR_ERR(dn); + goto done; + } + req->r_dentry = dn; /* may have spliced */ + igrab(in); + rinfo->head->is_dentry = 1; /* fool notrace handlers */ + } + + if (rinfo->head->is_target) { + vino.ino = le64_to_cpu(rinfo->targeti.in->ino); + vino.snap = le64_to_cpu(rinfo->targeti.in->snapid); + + if (in == NULL || ceph_ino(in) != vino.ino || + ceph_snap(in) != vino.snap) { + in = ceph_get_inode(sb, vino); + if (IS_ERR(in)) { + err = PTR_ERR(in); + goto done; + } + } + req->r_target_inode = in; + + err = fill_inode(in, + &rinfo->targeti, NULL, + session, req->r_request_started, + (le32_to_cpu(rinfo->head->result) == 0) ? + req->r_fmode : -1, + &req->r_caps_reservation); + if (err < 0) { + pr_err("fill_inode badness %p %llx.%llx\n", + in, ceph_vinop(in)); + goto done; + } + } + +done: + dout("fill_trace done err=%d\n", err); + return err; +} + +/* + * Prepopulate our cache with readdir results, leases, etc. + */ +int ceph_readdir_prepopulate(struct ceph_mds_request *req, + struct ceph_mds_session *session) +{ + struct dentry *parent = req->r_dentry; + struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; + struct qstr dname; + struct dentry *dn; + struct inode *in; + int err = 0, i; + struct inode *snapdir = NULL; + struct ceph_mds_request_head *rhead = req->r_request->front.iov_base; + u64 frag = le32_to_cpu(rhead->args.readdir.frag); + struct ceph_dentry_info *di; + + if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) { + snapdir = ceph_get_snapdir(parent->d_inode); + parent = d_find_alias(snapdir); + dout("readdir_prepopulate %d items under SNAPDIR dn %p\n", + rinfo->dir_nr, parent); + } else { + dout("readdir_prepopulate %d items under dn %p\n", + rinfo->dir_nr, parent); + if (rinfo->dir_dir) + ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir); + } + + for (i = 0; i < rinfo->dir_nr; i++) { + struct ceph_vino vino; + + dname.name = rinfo->dir_dname[i]; + dname.len = rinfo->dir_dname_len[i]; + dname.hash = full_name_hash(dname.name, dname.len); + + vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino); + vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid); + +retry_lookup: + dn = d_lookup(parent, &dname); + dout("d_lookup on parent=%p name=%.*s got %p\n", + parent, dname.len, dname.name, dn); + + if (!dn) { + dn = d_alloc(parent, &dname); + dout("d_alloc %p '%.*s' = %p\n", parent, + dname.len, dname.name, dn); + if (dn == NULL) { + dout("d_alloc badness\n"); + err = -ENOMEM; + goto out; + } + err = ceph_init_dentry(dn); + if (err < 0) + goto out; + } else if (dn->d_inode && + (ceph_ino(dn->d_inode) != vino.ino || + ceph_snap(dn->d_inode) != vino.snap)) { + dout(" dn %p points to wrong inode %p\n", + dn, dn->d_inode); + d_delete(dn); + dput(dn); + goto retry_lookup; + } else { + /* reorder parent's d_subdirs */ + spin_lock(&dcache_lock); + spin_lock(&dn->d_lock); + list_move(&dn->d_u.d_child, &parent->d_subdirs); + spin_unlock(&dn->d_lock); + spin_unlock(&dcache_lock); + } + + di = dn->d_fsdata; + di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset); + + /* inode */ + if (dn->d_inode) { + in = dn->d_inode; + } else { + in = ceph_get_inode(parent->d_sb, vino); + if (in == NULL) { + dout("new_inode badness\n"); + d_delete(dn); + dput(dn); + err = -ENOMEM; + goto out; + } + dn = splice_dentry(dn, in, NULL); + } + + if (fill_inode(in, &rinfo->dir_in[i], NULL, session, + req->r_request_started, -1, + &req->r_caps_reservation) < 0) { + pr_err("fill_inode badness on %p\n", in); + dput(dn); + continue; + } + update_dentry_lease(dn, rinfo->dir_dlease[i], + req->r_session, req->r_request_started); + dput(dn); + } + req->r_did_prepopulate = true; + +out: + if (snapdir) { + iput(snapdir); + dput(parent); + } + dout("readdir_prepopulate done\n"); + return err; +} + +int ceph_inode_set_size(struct inode *inode, loff_t size) +{ + struct ceph_inode_info *ci = ceph_inode(inode); + int ret = 0; + + spin_lock(&inode->i_lock); + dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size); + inode->i_size = size; + inode->i_blocks = (size + (1 << 9) - 1) >> 9; + + /* tell the MDS if we are approaching max_size */ + if ((size << 1) >= ci->i_max_size && + (ci->i_reported_size << 1) < ci->i_max_size) + ret = 1; + + spin_unlock(&inode->i_lock); + return ret; +} + +/* + * Write back inode data in a worker thread. (This can't be done + * in the message handler context.) + */ +void ceph_inode_writeback(struct work_struct *work) +{ + struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, + i_wb_work); + struct inode *inode = &ci->vfs_inode; + + dout("writeback %p\n", inode); + filemap_fdatawrite(&inode->i_data); + iput(inode); +} + +/* + * Invalidate inode pages in a worker thread. (This can't be done + * in the message handler context.) + */ +static void ceph_inode_invalidate_pages(struct work_struct *work) +{ + struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, + i_pg_inv_work); + struct inode *inode = &ci->vfs_inode; + u32 orig_gen; + int check = 0; + + spin_lock(&inode->i_lock); + dout("invalidate_pages %p gen %d revoking %d\n", inode, + ci->i_rdcache_gen, ci->i_rdcache_revoking); + if (ci->i_rdcache_gen == 0 || + ci->i_rdcache_revoking != ci->i_rdcache_gen) { + BUG_ON(ci->i_rdcache_revoking > ci->i_rdcache_gen); + /* nevermind! */ + ci->i_rdcache_revoking = 0; + spin_unlock(&inode->i_lock); + goto out; + } + orig_gen = ci->i_rdcache_gen; + spin_unlock(&inode->i_lock); + + truncate_inode_pages(&inode->i_data, 0); + + spin_lock(&inode->i_lock); + if (orig_gen == ci->i_rdcache_gen) { + dout("invalidate_pages %p gen %d successful\n", inode, + ci->i_rdcache_gen); + ci->i_rdcache_gen = 0; + ci->i_rdcache_revoking = 0; + check = 1; + } else { + dout("invalidate_pages %p gen %d raced, gen now %d\n", + inode, orig_gen, ci->i_rdcache_gen); + } + spin_unlock(&inode->i_lock); + + if (check) + ceph_check_caps(ci, 0, NULL); +out: + iput(inode); +} + + +/* + * called by trunc_wq; take i_mutex ourselves + * + * We also truncate in a separate thread as well. + */ +void ceph_vmtruncate_work(struct work_struct *work) +{ + struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, + i_vmtruncate_work); + struct inode *inode = &ci->vfs_inode; + + dout("vmtruncate_work %p\n", inode); + mutex_lock(&inode->i_mutex); + __ceph_do_pending_vmtruncate(inode); + mutex_unlock(&inode->i_mutex); + iput(inode); +} + +/* + * called with i_mutex held. + * + * Make sure any pending truncation is applied before doing anything + * that may depend on it. + */ +void __ceph_do_pending_vmtruncate(struct inode *inode) +{ + struct ceph_inode_info *ci = ceph_inode(inode); + u64 to; + int wrbuffer_refs, wake = 0; + +retry: + spin_lock(&inode->i_lock); + if (ci->i_truncate_pending == 0) { + dout("__do_pending_vmtruncate %p none pending\n", inode); + spin_unlock(&inode->i_lock); + return; + } + + /* + * make sure any dirty snapped pages are flushed before we + * possibly truncate them.. so write AND block! + */ + if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) { + dout("__do_pending_vmtruncate %p flushing snaps first\n", + inode); + spin_unlock(&inode->i_lock); + filemap_write_and_wait_range(&inode->i_data, 0, + inode->i_sb->s_maxbytes); + goto retry; + } + + to = ci->i_truncate_size; + wrbuffer_refs = ci->i_wrbuffer_ref; + dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode, + ci->i_truncate_pending, to); + spin_unlock(&inode->i_lock); + + truncate_inode_pages(inode->i_mapping, to); + + spin_lock(&inode->i_lock); + ci->i_truncate_pending--; + if (ci->i_truncate_pending == 0) + wake = 1; + spin_unlock(&inode->i_lock); + + if (wrbuffer_refs == 0) + ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); + if (wake) + wake_up(&ci->i_cap_wq); +} + + +/* + * symlinks + */ +static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd) +{ + struct ceph_inode_info *ci = ceph_inode(dentry->d_inode); + nd_set_link(nd, ci->i_symlink); + return NULL; +} + +static const struct inode_operations ceph_symlink_iops = { + .readlink = generic_readlink, + .follow_link = ceph_sym_follow_link, +}; + +/* + * setattr + */ +int ceph_setattr(struct dentry *dentry, struct iattr *attr) +{ + struct inode *inode = dentry->d_inode; + struct ceph_inode_info *ci = ceph_inode(inode); + struct inode *parent_inode = dentry->d_parent->d_inode; + const unsigned int ia_valid = attr->ia_valid; + struct ceph_mds_request *req; + struct ceph_mds_client *mdsc = &ceph_client(dentry->d_sb)->mdsc; + int issued; + int release = 0, dirtied = 0; + int mask = 0; + int err = 0; + int queue_trunc = 0; + + if (ceph_snap(inode) != CEPH_NOSNAP) + return -EROFS; + + __ceph_do_pending_vmtruncate(inode); + + err = inode_change_ok(inode, attr); + if (err != 0) + return err; + + req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR, + USE_AUTH_MDS); + if (IS_ERR(req)) + return PTR_ERR(req); + + spin_lock(&inode->i_lock); + issued = __ceph_caps_issued(ci, NULL); + dout("setattr %p issued %s\n", inode, ceph_cap_string(issued)); + + if (ia_valid & ATTR_UID) { + dout("setattr %p uid %d -> %d\n", inode, + inode->i_uid, attr->ia_uid); + if (issued & CEPH_CAP_AUTH_EXCL) { + inode->i_uid = attr->ia_uid; + dirtied |= CEPH_CAP_AUTH_EXCL; + } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || + attr->ia_uid != inode->i_uid) { + req->r_args.setattr.uid = cpu_to_le32(attr->ia_uid); + mask |= CEPH_SETATTR_UID; + release |= CEPH_CAP_AUTH_SHARED; + } + } + if (ia_valid & ATTR_GID) { + dout("setattr %p gid %d -> %d\n", inode, + inode->i_gid, attr->ia_gid); + if (issued & CEPH_CAP_AUTH_EXCL) { + inode->i_gid = attr->ia_gid; + dirtied |= CEPH_CAP_AUTH_EXCL; + } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || + attr->ia_gid != inode->i_gid) { + req->r_args.setattr.gid = cpu_to_le32(attr->ia_gid); + mask |= CEPH_SETATTR_GID; + release |= CEPH_CAP_AUTH_SHARED; + } + } + if (ia_valid & ATTR_MODE) { + dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode, + attr->ia_mode); + if (issued & CEPH_CAP_AUTH_EXCL) { + inode->i_mode = attr->ia_mode; + dirtied |= CEPH_CAP_AUTH_EXCL; + } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || + attr->ia_mode != inode->i_mode) { + req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode); + mask |= CEPH_SETATTR_MODE; + release |= CEPH_CAP_AUTH_SHARED; + } + } + + if (ia_valid & ATTR_ATIME) { + dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode, + inode->i_atime.tv_sec, inode->i_atime.tv_nsec, + attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec); + if (issued & CEPH_CAP_FILE_EXCL) { + ci->i_time_warp_seq++; + inode->i_atime = attr->ia_atime; + dirtied |= CEPH_CAP_FILE_EXCL; + } else if ((issued & CEPH_CAP_FILE_WR) && + timespec_compare(&inode->i_atime, + &attr->ia_atime) < 0) { + inode->i_atime = attr->ia_atime; + dirtied |= CEPH_CAP_FILE_WR; + } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || + !timespec_equal(&inode->i_atime, &attr->ia_atime)) { + ceph_encode_timespec(&req->r_args.setattr.atime, + &attr->ia_atime); + mask |= CEPH_SETATTR_ATIME; + release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD | + CEPH_CAP_FILE_WR; + } + } + if (ia_valid & ATTR_MTIME) { + dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode, + inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec, + attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec); + if (issued & CEPH_CAP_FILE_EXCL) { + ci->i_time_warp_seq++; + inode->i_mtime = attr->ia_mtime; + dirtied |= CEPH_CAP_FILE_EXCL; + } else if ((issued & CEPH_CAP_FILE_WR) && + timespec_compare(&inode->i_mtime, + &attr->ia_mtime) < 0) { + inode->i_mtime = attr->ia_mtime; + dirtied |= CEPH_CAP_FILE_WR; + } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || + !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) { + ceph_encode_timespec(&req->r_args.setattr.mtime, + &attr->ia_mtime); + mask |= CEPH_SETATTR_MTIME; + release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD | + CEPH_CAP_FILE_WR; + } + } + if (ia_valid & ATTR_SIZE) { + dout("setattr %p size %lld -> %lld\n", inode, + inode->i_size, attr->ia_size); + if (attr->ia_size > inode->i_sb->s_maxbytes) { + err = -EINVAL; + goto out; + } + if ((issued & CEPH_CAP_FILE_EXCL) && + attr->ia_size > inode->i_size) { + inode->i_size = attr->ia_size; + if (attr->ia_size < inode->i_size) { + ci->i_truncate_size = attr->ia_size; + ci->i_truncate_pending++; + queue_trunc = 1; + } + inode->i_blocks = + (attr->ia_size + (1 << 9) - 1) >> 9; + inode->i_ctime = attr->ia_ctime; + ci->i_reported_size = attr->ia_size; + dirtied |= CEPH_CAP_FILE_EXCL; + } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || + attr->ia_size != inode->i_size) { + req->r_args.setattr.size = cpu_to_le64(attr->ia_size); + req->r_args.setattr.old_size = + cpu_to_le64(inode->i_size); + mask |= CEPH_SETATTR_SIZE; + release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD | + CEPH_CAP_FILE_WR; + } + } + + /* these do nothing */ + if (ia_valid & ATTR_CTIME) { + bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME| + ATTR_MODE|ATTR_UID|ATTR_GID)) == 0; + dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode, + inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, + attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec, + only ? "ctime only" : "ignored"); + inode->i_ctime = attr->ia_ctime; + if (only) { + /* + * if kernel wants to dirty ctime but nothing else, + * we need to choose a cap to dirty under, or do + * a almost-no-op setattr + */ + if (issued & CEPH_CAP_AUTH_EXCL) + dirtied |= CEPH_CAP_AUTH_EXCL; + else if (issued & CEPH_CAP_FILE_EXCL) + dirtied |= CEPH_CAP_FILE_EXCL; + else if (issued & CEPH_CAP_XATTR_EXCL) + dirtied |= CEPH_CAP_XATTR_EXCL; + else + mask |= CEPH_SETATTR_CTIME; + } + } + if (ia_valid & ATTR_FILE) + dout("setattr %p ATTR_FILE ... hrm!\n", inode); + + if (dirtied) { + __ceph_mark_dirty_caps(ci, dirtied); + inode->i_ctime = CURRENT_TIME; + } + + release &= issued; + spin_unlock(&inode->i_lock); + + if (queue_trunc) + __ceph_do_pending_vmtruncate(inode); + + if (mask) { + req->r_inode = igrab(inode); + req->r_inode_drop = release; + req->r_args.setattr.mask = cpu_to_le32(mask); + req->r_num_caps = 1; + err = ceph_mdsc_do_request(mdsc, parent_inode, req); + } + dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err, + ceph_cap_string(dirtied), mask); + + ceph_mdsc_put_request(req); + __ceph_do_pending_vmtruncate(inode); + return err; +out: + spin_unlock(&inode->i_lock); + ceph_mdsc_put_request(req); + return err; +} + +/* + * Verify that we have a lease on the given mask. If not, + * do a getattr against an mds. + */ +int ceph_do_getattr(struct inode *inode, int mask) +{ + struct ceph_client *client = ceph_sb_to_client(inode->i_sb); + struct ceph_mds_client *mdsc = &client->mdsc; + struct ceph_mds_request *req; + int err; + + if (ceph_snap(inode) == CEPH_SNAPDIR) { + dout("do_getattr inode %p SNAPDIR\n", inode); + return 0; + } + + dout("do_getattr inode %p mask %s\n", inode, ceph_cap_string(mask)); + if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1)) + return 0; + + req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS); + if (IS_ERR(req)) + return PTR_ERR(req); + req->r_inode = igrab(inode); + req->r_num_caps = 1; + req->r_args.getattr.mask = cpu_to_le32(mask); + err = ceph_mdsc_do_request(mdsc, NULL, req); + ceph_mdsc_put_request(req); + dout("do_getattr result=%d\n", err); + return err; +} + + +/* + * Check inode permissions. We verify we have a valid value for + * the AUTH cap, then call the generic handler. + */ +int ceph_permission(struct inode *inode, int mask) +{ + int err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED); + + if (!err) + err = generic_permission(inode, mask, NULL); + return err; +} + +/* + * Get all attributes. Hopefully somedata we'll have a statlite() + * and can limit the fields we require to be accurate. + */ +int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry, + struct kstat *stat) +{ + struct inode *inode = dentry->d_inode; + int err; + + err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL); + if (!err) { + generic_fillattr(inode, stat); + stat->ino = inode->i_ino; + if (ceph_snap(inode) != CEPH_NOSNAP) + stat->dev = ceph_snap(inode); + else + stat->dev = 0; + if (S_ISDIR(inode->i_mode)) + stat->blksize = 65536; + } + return err; +} diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c new file mode 100644 index 000000000000..8eaac04d1b87 --- /dev/null +++ b/fs/ceph/xattr.c @@ -0,0 +1,833 @@ +#include "ceph_debug.h" +#include "super.h" +#include "decode.h" + +#include + +static bool ceph_is_valid_xattr(const char *name) +{ + return !strncmp(name, XATTR_SECURITY_PREFIX, + XATTR_SECURITY_PREFIX_LEN) || + !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) || + !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN); +} + +/* + * These define virtual xattrs exposing the recursive directory + * statistics and layout metadata. + */ +struct ceph_vxattr_cb { + bool readonly; + char *name; + size_t (*getxattr_cb)(struct ceph_inode_info *ci, char *val, + size_t size); +}; + +/* directories */ + +static size_t ceph_vxattrcb_entries(struct ceph_inode_info *ci, char *val, + size_t size) +{ + return snprintf(val, size, "%lld", ci->i_files + ci->i_subdirs); +} + +static size_t ceph_vxattrcb_files(struct ceph_inode_info *ci, char *val, + size_t size) +{ + return snprintf(val, size, "%lld", ci->i_files); +} + +static size_t ceph_vxattrcb_subdirs(struct ceph_inode_info *ci, char *val, + size_t size) +{ + return snprintf(val, size, "%lld", ci->i_subdirs); +} + +static size_t ceph_vxattrcb_rentries(struct ceph_inode_info *ci, char *val, + size_t size) +{ + return snprintf(val, size, "%lld", ci->i_rfiles + ci->i_rsubdirs); +} + +static size_t ceph_vxattrcb_rfiles(struct ceph_inode_info *ci, char *val, + size_t size) +{ + return snprintf(val, size, "%lld", ci->i_rfiles); +} + +static size_t ceph_vxattrcb_rsubdirs(struct ceph_inode_info *ci, char *val, + size_t size) +{ + return snprintf(val, size, "%lld", ci->i_rsubdirs); +} + +static size_t ceph_vxattrcb_rbytes(struct ceph_inode_info *ci, char *val, + size_t size) +{ + return snprintf(val, size, "%lld", ci->i_rbytes); +} + +static size_t ceph_vxattrcb_rctime(struct ceph_inode_info *ci, char *val, + size_t size) +{ + return snprintf(val, size, "%ld.%ld", (long)ci->i_rctime.tv_sec, + (long)ci->i_rctime.tv_nsec); +} + +static struct ceph_vxattr_cb ceph_dir_vxattrs[] = { + { true, "user.ceph.dir.entries", ceph_vxattrcb_entries}, + { true, "user.ceph.dir.files", ceph_vxattrcb_files}, + { true, "user.ceph.dir.subdirs", ceph_vxattrcb_subdirs}, + { true, "user.ceph.dir.rentries", ceph_vxattrcb_rentries}, + { true, "user.ceph.dir.rfiles", ceph_vxattrcb_rfiles}, + { true, "user.ceph.dir.rsubdirs", ceph_vxattrcb_rsubdirs}, + { true, "user.ceph.dir.rbytes", ceph_vxattrcb_rbytes}, + { true, "user.ceph.dir.rctime", ceph_vxattrcb_rctime}, + { true, NULL, NULL } +}; + +/* files */ + +static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val, + size_t size) +{ + return snprintf(val, size, + "chunk_bytes=%lld\nstripe_count=%lld\nobject_size=%lld\n", + (unsigned long long)ceph_file_layout_su(ci->i_layout), + (unsigned long long)ceph_file_layout_stripe_count(ci->i_layout), + (unsigned long long)ceph_file_layout_object_size(ci->i_layout)); +} + +static struct ceph_vxattr_cb ceph_file_vxattrs[] = { + { true, "user.ceph.layout", ceph_vxattrcb_layout}, + { NULL, NULL } +}; + +static struct ceph_vxattr_cb *ceph_inode_vxattrs(struct inode *inode) +{ + if (S_ISDIR(inode->i_mode)) + return ceph_dir_vxattrs; + else if (S_ISREG(inode->i_mode)) + return ceph_file_vxattrs; + return NULL; +} + +static struct ceph_vxattr_cb *ceph_match_vxattr(struct ceph_vxattr_cb *vxattr, + const char *name) +{ + do { + if (strcmp(vxattr->name, name) == 0) + return vxattr; + vxattr++; + } while (vxattr->name); + return NULL; +} + +static int __set_xattr(struct ceph_inode_info *ci, + const char *name, int name_len, + const char *val, int val_len, + int dirty, + int should_free_name, int should_free_val, + struct ceph_inode_xattr **newxattr) +{ + struct rb_node **p; + struct rb_node *parent = NULL; + struct ceph_inode_xattr *xattr = NULL; + int c; + int new = 0; + + p = &ci->i_xattrs.index.rb_node; + while (*p) { + parent = *p; + xattr = rb_entry(parent, struct ceph_inode_xattr, node); + c = strncmp(name, xattr->name, min(name_len, xattr->name_len)); + if (c < 0) + p = &(*p)->rb_left; + else if (c > 0) + p = &(*p)->rb_right; + else { + if (name_len == xattr->name_len) + break; + else if (name_len < xattr->name_len) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + xattr = NULL; + } + + if (!xattr) { + new = 1; + xattr = *newxattr; + xattr->name = name; + xattr->name_len = name_len; + xattr->should_free_name = should_free_name; + + ci->i_xattrs.count++; + dout("__set_xattr count=%d\n", ci->i_xattrs.count); + } else { + kfree(*newxattr); + *newxattr = NULL; + if (xattr->should_free_val) + kfree((void *)xattr->val); + + if (should_free_name) { + kfree((void *)name); + name = xattr->name; + } + ci->i_xattrs.names_size -= xattr->name_len; + ci->i_xattrs.vals_size -= xattr->val_len; + } + if (!xattr) { + pr_err("__set_xattr ENOMEM on %p %llx.%llx xattr %s=%s\n", + &ci->vfs_inode, ceph_vinop(&ci->vfs_inode), name, + xattr->val); + return -ENOMEM; + } + ci->i_xattrs.names_size += name_len; + ci->i_xattrs.vals_size += val_len; + if (val) + xattr->val = val; + else + xattr->val = ""; + + xattr->val_len = val_len; + xattr->dirty = dirty; + xattr->should_free_val = (val && should_free_val); + + if (new) { + rb_link_node(&xattr->node, parent, p); + rb_insert_color(&xattr->node, &ci->i_xattrs.index); + dout("__set_xattr_val p=%p\n", p); + } + + dout("__set_xattr_val added %llx.%llx xattr %p %s=%.*s\n", + ceph_vinop(&ci->vfs_inode), xattr, name, val_len, val); + + return 0; +} + +static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci, + const char *name) +{ + struct rb_node **p; + struct rb_node *parent = NULL; + struct ceph_inode_xattr *xattr = NULL; + int c; + + p = &ci->i_xattrs.index.rb_node; + while (*p) { + parent = *p; + xattr = rb_entry(parent, struct ceph_inode_xattr, node); + c = strncmp(name, xattr->name, xattr->name_len); + if (c < 0) + p = &(*p)->rb_left; + else if (c > 0) + p = &(*p)->rb_right; + else { + dout("__get_xattr %s: found %.*s\n", name, + xattr->val_len, xattr->val); + return xattr; + } + } + + dout("__get_xattr %s: not found\n", name); + + return NULL; +} + +static void __free_xattr(struct ceph_inode_xattr *xattr) +{ + BUG_ON(!xattr); + + if (xattr->should_free_name) + kfree((void *)xattr->name); + if (xattr->should_free_val) + kfree((void *)xattr->val); + + kfree(xattr); +} + +static int __remove_xattr(struct ceph_inode_info *ci, + struct ceph_inode_xattr *xattr) +{ + if (!xattr) + return -EOPNOTSUPP; + + rb_erase(&xattr->node, &ci->i_xattrs.index); + + if (xattr->should_free_name) + kfree((void *)xattr->name); + if (xattr->should_free_val) + kfree((void *)xattr->val); + + ci->i_xattrs.names_size -= xattr->name_len; + ci->i_xattrs.vals_size -= xattr->val_len; + ci->i_xattrs.count--; + kfree(xattr); + + return 0; +} + +static int __remove_xattr_by_name(struct ceph_inode_info *ci, + const char *name) +{ + struct rb_node **p; + struct ceph_inode_xattr *xattr; + int err; + + p = &ci->i_xattrs.index.rb_node; + xattr = __get_xattr(ci, name); + err = __remove_xattr(ci, xattr); + return err; +} + +static char *__copy_xattr_names(struct ceph_inode_info *ci, + char *dest) +{ + struct rb_node *p; + struct ceph_inode_xattr *xattr = NULL; + + p = rb_first(&ci->i_xattrs.index); + dout("__copy_xattr_names count=%d\n", ci->i_xattrs.count); + + while (p) { + xattr = rb_entry(p, struct ceph_inode_xattr, node); + memcpy(dest, xattr->name, xattr->name_len); + dest[xattr->name_len] = '\0'; + + dout("dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name, + xattr->name_len, ci->i_xattrs.names_size); + + dest += xattr->name_len + 1; + p = rb_next(p); + } + + return dest; +} + +void __ceph_destroy_xattrs(struct ceph_inode_info *ci) +{ + struct rb_node *p, *tmp; + struct ceph_inode_xattr *xattr = NULL; + + p = rb_first(&ci->i_xattrs.index); + + dout("__ceph_destroy_xattrs p=%p\n", p); + + while (p) { + xattr = rb_entry(p, struct ceph_inode_xattr, node); + tmp = p; + p = rb_next(tmp); + dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p, + xattr->name_len, xattr->name); + rb_erase(tmp, &ci->i_xattrs.index); + + __free_xattr(xattr); + } + + ci->i_xattrs.names_size = 0; + ci->i_xattrs.vals_size = 0; + ci->i_xattrs.index_version = 0; + ci->i_xattrs.count = 0; + ci->i_xattrs.index = RB_ROOT; +} + +static int __build_xattrs(struct inode *inode) +{ + u32 namelen; + u32 numattr = 0; + void *p, *end; + u32 len; + const char *name, *val; + struct ceph_inode_info *ci = ceph_inode(inode); + int xattr_version; + struct ceph_inode_xattr **xattrs = NULL; + int err; + int i; + + dout("__build_xattrs() len=%d\n", + ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0); + + if (ci->i_xattrs.index_version >= ci->i_xattrs.version) + return 0; /* already built */ + + __ceph_destroy_xattrs(ci); + +start: + /* updated internal xattr rb tree */ + if (ci->i_xattrs.blob && ci->i_xattrs.blob->vec.iov_len > 4) { + p = ci->i_xattrs.blob->vec.iov_base; + end = p + ci->i_xattrs.blob->vec.iov_len; + ceph_decode_32_safe(&p, end, numattr, bad); + xattr_version = ci->i_xattrs.version; + spin_unlock(&inode->i_lock); + + xattrs = kcalloc(numattr, sizeof(struct ceph_xattr *), + GFP_NOFS); + err = -ENOMEM; + if (!xattrs) + goto bad_lock; + memset(xattrs, 0, numattr*sizeof(struct ceph_xattr *)); + for (i = 0; i < numattr; i++) { + xattrs[i] = kmalloc(sizeof(struct ceph_inode_xattr), + GFP_NOFS); + if (!xattrs[i]) + goto bad_lock; + } + + spin_lock(&inode->i_lock); + if (ci->i_xattrs.version != xattr_version) { + /* lost a race, retry */ + for (i = 0; i < numattr; i++) + kfree(xattrs[i]); + kfree(xattrs); + goto start; + } + err = -EIO; + while (numattr--) { + ceph_decode_32_safe(&p, end, len, bad); + namelen = len; + name = p; + p += len; + ceph_decode_32_safe(&p, end, len, bad); + val = p; + p += len; + + err = __set_xattr(ci, name, namelen, val, len, + 0, 0, 0, &xattrs[numattr]); + + if (err < 0) + goto bad; + } + kfree(xattrs); + } + ci->i_xattrs.index_version = ci->i_xattrs.version; + ci->i_xattrs.dirty = false; + + return err; +bad_lock: + spin_lock(&inode->i_lock); +bad: + if (xattrs) { + for (i = 0; i < numattr; i++) + kfree(xattrs[i]); + kfree(xattrs); + } + ci->i_xattrs.names_size = 0; + return err; +} + +static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size, + int val_size) +{ + /* + * 4 bytes for the length, and additional 4 bytes per each xattr name, + * 4 bytes per each value + */ + int size = 4 + ci->i_xattrs.count*(4 + 4) + + ci->i_xattrs.names_size + + ci->i_xattrs.vals_size; + dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n", + ci->i_xattrs.count, ci->i_xattrs.names_size, + ci->i_xattrs.vals_size); + + if (name_size) + size += 4 + 4 + name_size + val_size; + + return size; +} + +/* + * If there are dirty xattrs, reencode xattrs into the prealloc_blob + * and swap into place. + */ +void __ceph_build_xattrs_blob(struct ceph_inode_info *ci) +{ + struct rb_node *p; + struct ceph_inode_xattr *xattr = NULL; + void *dest; + + dout("__build_xattrs_blob %p\n", &ci->vfs_inode); + if (ci->i_xattrs.dirty) { + int need = __get_required_blob_size(ci, 0, 0); + + BUG_ON(need > ci->i_xattrs.prealloc_blob->alloc_len); + + p = rb_first(&ci->i_xattrs.index); + dest = ci->i_xattrs.prealloc_blob->vec.iov_base; + + ceph_encode_32(&dest, ci->i_xattrs.count); + while (p) { + xattr = rb_entry(p, struct ceph_inode_xattr, node); + + ceph_encode_32(&dest, xattr->name_len); + memcpy(dest, xattr->name, xattr->name_len); + dest += xattr->name_len; + ceph_encode_32(&dest, xattr->val_len); + memcpy(dest, xattr->val, xattr->val_len); + dest += xattr->val_len; + + p = rb_next(p); + } + + /* adjust buffer len; it may be larger than we need */ + ci->i_xattrs.prealloc_blob->vec.iov_len = + dest - ci->i_xattrs.prealloc_blob->vec.iov_base; + + ceph_buffer_put(ci->i_xattrs.blob); + ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob; + ci->i_xattrs.prealloc_blob = NULL; + ci->i_xattrs.dirty = false; + } +} + +ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value, + size_t size) +{ + struct inode *inode = dentry->d_inode; + struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode); + int err; + struct ceph_inode_xattr *xattr; + struct ceph_vxattr_cb *vxattr = NULL; + + if (!ceph_is_valid_xattr(name)) + return -ENODATA; + + /* let's see if a virtual xattr was requested */ + if (vxattrs) + vxattr = ceph_match_vxattr(vxattrs, name); + + spin_lock(&inode->i_lock); + dout("getxattr %p ver=%lld index_ver=%lld\n", inode, + ci->i_xattrs.version, ci->i_xattrs.index_version); + + if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) && + (ci->i_xattrs.index_version >= ci->i_xattrs.version)) { + goto get_xattr; + } else { + spin_unlock(&inode->i_lock); + /* get xattrs from mds (if we don't already have them) */ + err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR); + if (err) + return err; + } + + spin_lock(&inode->i_lock); + + if (vxattr && vxattr->readonly) { + err = vxattr->getxattr_cb(ci, value, size); + goto out; + } + + err = __build_xattrs(inode); + if (err < 0) + goto out; + +get_xattr: + err = -ENODATA; /* == ENOATTR */ + xattr = __get_xattr(ci, name); + if (!xattr) { + if (vxattr) + err = vxattr->getxattr_cb(ci, value, size); + goto out; + } + + err = -ERANGE; + if (size && size < xattr->val_len) + goto out; + + err = xattr->val_len; + if (size == 0) + goto out; + + memcpy(value, xattr->val, xattr->val_len); + +out: + spin_unlock(&inode->i_lock); + return err; +} + +ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size) +{ + struct inode *inode = dentry->d_inode; + struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode); + u32 vir_namelen = 0; + u32 namelen; + int err; + u32 len; + int i; + + spin_lock(&inode->i_lock); + dout("listxattr %p ver=%lld index_ver=%lld\n", inode, + ci->i_xattrs.version, ci->i_xattrs.index_version); + + if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) && + (ci->i_xattrs.index_version > ci->i_xattrs.version)) { + goto list_xattr; + } else { + spin_unlock(&inode->i_lock); + err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR); + if (err) + return err; + } + + spin_lock(&inode->i_lock); + + err = __build_xattrs(inode); + if (err < 0) + goto out; + +list_xattr: + vir_namelen = 0; + /* include virtual dir xattrs */ + if (vxattrs) + for (i = 0; vxattrs[i].name; i++) + vir_namelen += strlen(vxattrs[i].name) + 1; + /* adding 1 byte per each variable due to the null termination */ + namelen = vir_namelen + ci->i_xattrs.names_size + ci->i_xattrs.count; + err = -ERANGE; + if (size && namelen > size) + goto out; + + err = namelen; + if (size == 0) + goto out; + + names = __copy_xattr_names(ci, names); + + /* virtual xattr names, too */ + if (vxattrs) + for (i = 0; vxattrs[i].name; i++) { + len = sprintf(names, "%s", vxattrs[i].name); + names += len + 1; + } + +out: + spin_unlock(&inode->i_lock); + return err; +} + +static int ceph_sync_setxattr(struct dentry *dentry, const char *name, + const char *value, size_t size, int flags) +{ + struct ceph_client *client = ceph_client(dentry->d_sb); + struct inode *inode = dentry->d_inode; + struct ceph_inode_info *ci = ceph_inode(inode); + struct inode *parent_inode = dentry->d_parent->d_inode; + struct ceph_mds_request *req; + struct ceph_mds_client *mdsc = &client->mdsc; + int err; + int i, nr_pages; + struct page **pages = NULL; + void *kaddr; + + /* copy value into some pages */ + nr_pages = calc_pages_for(0, size); + if (nr_pages) { + pages = kmalloc(sizeof(pages[0])*nr_pages, GFP_NOFS); + if (!pages) + return -ENOMEM; + err = -ENOMEM; + for (i = 0; i < nr_pages; i++) { + pages[i] = alloc_page(GFP_NOFS); + if (!pages[i]) { + nr_pages = i; + goto out; + } + kaddr = kmap(pages[i]); + memcpy(kaddr, value + i*PAGE_CACHE_SIZE, + min(PAGE_CACHE_SIZE, size-i*PAGE_CACHE_SIZE)); + } + } + + dout("setxattr value=%.*s\n", (int)size, value); + + /* do request */ + req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETXATTR, + USE_AUTH_MDS); + if (IS_ERR(req)) + return PTR_ERR(req); + req->r_inode = igrab(inode); + req->r_inode_drop = CEPH_CAP_XATTR_SHARED; + req->r_num_caps = 1; + req->r_args.setxattr.flags = cpu_to_le32(flags); + req->r_path2 = kstrdup(name, GFP_NOFS); + + req->r_pages = pages; + req->r_num_pages = nr_pages; + req->r_data_len = size; + + dout("xattr.ver (before): %lld\n", ci->i_xattrs.version); + err = ceph_mdsc_do_request(mdsc, parent_inode, req); + ceph_mdsc_put_request(req); + dout("xattr.ver (after): %lld\n", ci->i_xattrs.version); + +out: + if (pages) { + for (i = 0; i < nr_pages; i++) + __free_page(pages[i]); + kfree(pages); + } + return err; +} + +int ceph_setxattr(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags) +{ + struct inode *inode = dentry->d_inode; + struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode); + int err; + int name_len = strlen(name); + int val_len = size; + char *newname = NULL; + char *newval = NULL; + struct ceph_inode_xattr *xattr = NULL; + int issued; + int required_blob_size; + + if (ceph_snap(inode) != CEPH_NOSNAP) + return -EROFS; + + if (!ceph_is_valid_xattr(name)) + return -EOPNOTSUPP; + + if (vxattrs) { + struct ceph_vxattr_cb *vxattr = + ceph_match_vxattr(vxattrs, name); + if (vxattr && vxattr->readonly) + return -EOPNOTSUPP; + } + + /* preallocate memory for xattr name, value, index node */ + err = -ENOMEM; + newname = kmalloc(name_len + 1, GFP_NOFS); + if (!newname) + goto out; + memcpy(newname, name, name_len + 1); + + if (val_len) { + newval = kmalloc(val_len + 1, GFP_NOFS); + if (!newval) + goto out; + memcpy(newval, value, val_len); + newval[val_len] = '\0'; + } + + xattr = kmalloc(sizeof(struct ceph_inode_xattr), GFP_NOFS); + if (!xattr) + goto out; + + spin_lock(&inode->i_lock); +retry: + issued = __ceph_caps_issued(ci, NULL); + if (!(issued & CEPH_CAP_XATTR_EXCL)) + goto do_sync; + __build_xattrs(inode); + + required_blob_size = __get_required_blob_size(ci, name_len, val_len); + + if (!ci->i_xattrs.prealloc_blob || + required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) { + struct ceph_buffer *blob = NULL; + + spin_unlock(&inode->i_lock); + dout(" preaallocating new blob size=%d\n", required_blob_size); + blob = ceph_buffer_new_alloc(required_blob_size, GFP_NOFS); + if (!blob) + goto out; + spin_lock(&inode->i_lock); + ceph_buffer_put(ci->i_xattrs.prealloc_blob); + ci->i_xattrs.prealloc_blob = blob; + goto retry; + } + + dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued)); + err = __set_xattr(ci, newname, name_len, newval, + val_len, 1, 1, 1, &xattr); + __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL); + ci->i_xattrs.dirty = true; + inode->i_ctime = CURRENT_TIME; + spin_unlock(&inode->i_lock); + + return err; + +do_sync: + spin_unlock(&inode->i_lock); + err = ceph_sync_setxattr(dentry, name, value, size, flags); +out: + kfree(newname); + kfree(newval); + kfree(xattr); + return err; +} + +static int ceph_send_removexattr(struct dentry *dentry, const char *name) +{ + struct ceph_client *client = ceph_client(dentry->d_sb); + struct ceph_mds_client *mdsc = &client->mdsc; + struct inode *inode = dentry->d_inode; + struct inode *parent_inode = dentry->d_parent->d_inode; + struct ceph_mds_request *req; + int err; + + req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RMXATTR, + USE_AUTH_MDS); + if (IS_ERR(req)) + return PTR_ERR(req); + req->r_inode = igrab(inode); + req->r_inode_drop = CEPH_CAP_XATTR_SHARED; + req->r_num_caps = 1; + req->r_path2 = kstrdup(name, GFP_NOFS); + + err = ceph_mdsc_do_request(mdsc, parent_inode, req); + ceph_mdsc_put_request(req); + return err; +} + +int ceph_removexattr(struct dentry *dentry, const char *name) +{ + struct inode *inode = dentry->d_inode; + struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode); + int issued; + int err; + + if (ceph_snap(inode) != CEPH_NOSNAP) + return -EROFS; + + if (!ceph_is_valid_xattr(name)) + return -EOPNOTSUPP; + + if (vxattrs) { + struct ceph_vxattr_cb *vxattr = + ceph_match_vxattr(vxattrs, name); + if (vxattr && vxattr->readonly) + return -EOPNOTSUPP; + } + + spin_lock(&inode->i_lock); + __build_xattrs(inode); + issued = __ceph_caps_issued(ci, NULL); + dout("removexattr %p issued %s\n", inode, ceph_cap_string(issued)); + + if (!(issued & CEPH_CAP_XATTR_EXCL)) + goto do_sync; + + err = __remove_xattr_by_name(ceph_inode(inode), name); + __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL); + ci->i_xattrs.dirty = true; + inode->i_ctime = CURRENT_TIME; + + spin_unlock(&inode->i_lock); + + return err; +do_sync: + spin_unlock(&inode->i_lock); + err = ceph_send_removexattr(dentry, name); + return err; +} + -- cgit v1.2.3 From 2817b000b02c5f0c05af67c01fb2684e1381d6ef Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 6 Oct 2009 11:31:08 -0700 Subject: ceph: directory operations Directory operations, including lookup, are defined here. We take advantage of lookup intents when possible. For the most part, we just need to build the proper requests for the metadata server(s) and pass things off to the mds_client. The results of most operations are normally incorporated into the client's cache when the reply is parsed by ceph_fill_trace(). However, if the MDS replies without a trace (e.g., when retrying an update after an MDS failure recovery), some operation-specific cleanup may be needed. We can validate cached dentries in two ways. A per-dentry lease may be issued by the MDS, or a per-directory cap may be issued that acts as a lease on the entire directory. In the latter case, a 'gen' value is used to determine which dentries belong to the currently leased directory contents. We normally prepopulate the dcache and icache with readdir results. This makes subsequent lookups and getattrs avoid any server interaction. It also lets us satisfy readdir operation by peeking at the dcache IFF we hold the per-directory cap/lease, previously performed a readdir, and haven't dropped any of the resulting dentries. Signed-off-by: Sage Weil --- fs/ceph/dir.c | 1212 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 1212 insertions(+) create mode 100644 fs/ceph/dir.c diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c new file mode 100644 index 000000000000..7bb8db524e58 --- /dev/null +++ b/fs/ceph/dir.c @@ -0,0 +1,1212 @@ +#include "ceph_debug.h" + +#include +#include +#include +#include + +#include "super.h" + +/* + * Directory operations: readdir, lookup, create, link, unlink, + * rename, etc. + */ + +/* + * Ceph MDS operations are specified in terms of a base ino and + * relative path. Thus, the client can specify an operation on a + * specific inode (e.g., a getattr due to fstat(2)), or as a path + * relative to, say, the root directory. + * + * Normally, we limit ourselves to strict inode ops (no path component) + * or dentry operations (a single path component relative to an ino). The + * exception to this is open_root_dentry(), which will open the mount + * point by name. + */ + +const struct inode_operations ceph_dir_iops; +const struct file_operations ceph_dir_fops; +struct dentry_operations ceph_dentry_ops; + +/* + * Initialize ceph dentry state. + */ +int ceph_init_dentry(struct dentry *dentry) +{ + struct ceph_dentry_info *di; + + if (dentry->d_fsdata) + return 0; + + if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP) + dentry->d_op = &ceph_dentry_ops; + else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR) + dentry->d_op = &ceph_snapdir_dentry_ops; + else + dentry->d_op = &ceph_snap_dentry_ops; + + di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS); + if (!di) + return -ENOMEM; /* oh well */ + + spin_lock(&dentry->d_lock); + if (dentry->d_fsdata) /* lost a race */ + goto out_unlock; + di->dentry = dentry; + di->lease_session = NULL; + dentry->d_fsdata = di; + dentry->d_time = jiffies; + ceph_dentry_lru_add(dentry); +out_unlock: + spin_unlock(&dentry->d_lock); + return 0; +} + + + +/* + * for readdir, we encode the directory frag and offset within that + * frag into f_pos. + */ +static unsigned fpos_frag(loff_t p) +{ + return p >> 32; +} +static unsigned fpos_off(loff_t p) +{ + return p & 0xffffffff; +} + +/* + * When possible, we try to satisfy a readdir by peeking at the + * dcache. We make this work by carefully ordering dentries on + * d_u.d_child when we initially get results back from the MDS, and + * falling back to a "normal" sync readdir if any dentries in the dir + * are dropped. + * + * I_COMPLETE tells indicates we have all dentries in the dir. It is + * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by + * the MDS if/when the directory is modified). + */ +static int __dcache_readdir(struct file *filp, + void *dirent, filldir_t filldir) +{ + struct inode *inode = filp->f_dentry->d_inode; + struct ceph_file_info *fi = filp->private_data; + struct dentry *parent = filp->f_dentry; + struct inode *dir = parent->d_inode; + struct list_head *p; + struct dentry *dentry, *last; + struct ceph_dentry_info *di; + int err = 0; + + /* claim ref on last dentry we returned */ + last = fi->dentry; + fi->dentry = NULL; + + dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos, + last); + + spin_lock(&dcache_lock); + + /* start at beginning? */ + if (filp->f_pos == 2 || (last && + filp->f_pos < ceph_dentry(last)->offset)) { + if (list_empty(&parent->d_subdirs)) + goto out_unlock; + p = parent->d_subdirs.prev; + dout(" initial p %p/%p\n", p->prev, p->next); + } else { + p = last->d_u.d_child.prev; + } + +more: + dentry = list_entry(p, struct dentry, d_u.d_child); + di = ceph_dentry(dentry); + while (1) { + dout(" p %p/%p d_subdirs %p/%p\n", p->prev, p->next, + parent->d_subdirs.prev, parent->d_subdirs.next); + if (p == &parent->d_subdirs) { + fi->at_end = 1; + goto out_unlock; + } + if (!d_unhashed(dentry) && dentry->d_inode && + filp->f_pos <= di->offset) + break; + dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry, + dentry->d_name.len, dentry->d_name.name, di->offset, + filp->f_pos, d_unhashed(dentry) ? " unhashed" : "", + !dentry->d_inode ? " null" : ""); + p = p->prev; + dentry = list_entry(p, struct dentry, d_u.d_child); + di = ceph_dentry(dentry); + } + + atomic_inc(&dentry->d_count); + spin_unlock(&dcache_lock); + spin_unlock(&inode->i_lock); + + dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos, + dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode); + filp->f_pos = di->offset; + err = filldir(dirent, dentry->d_name.name, + dentry->d_name.len, di->offset, + dentry->d_inode->i_ino, + dentry->d_inode->i_mode >> 12); + + if (last) { + if (err < 0) { + /* remember our position */ + fi->dentry = last; + fi->next_offset = di->offset; + } else { + dput(last); + } + last = NULL; + } + + spin_lock(&inode->i_lock); + spin_lock(&dcache_lock); + + if (err < 0) + goto out_unlock; + + last = dentry; + + p = p->prev; + filp->f_pos++; + + /* make sure a dentry wasn't dropped while we didn't have dcache_lock */ + if ((ceph_inode(dir)->i_ceph_flags & CEPH_I_COMPLETE)) + goto more; + dout(" lost I_COMPLETE on %p; falling back to mds\n", dir); + err = -EAGAIN; + +out_unlock: + spin_unlock(&dcache_lock); + + if (last) { + spin_unlock(&inode->i_lock); + dput(last); + spin_lock(&inode->i_lock); + } + + return err; +} + +/* + * make note of the last dentry we read, so we can + * continue at the same lexicographical point, + * regardless of what dir changes take place on the + * server. + */ +static int note_last_dentry(struct ceph_file_info *fi, const char *name, + int len) +{ + kfree(fi->last_name); + fi->last_name = kmalloc(len+1, GFP_NOFS); + if (!fi->last_name) + return -ENOMEM; + memcpy(fi->last_name, name, len); + fi->last_name[len] = 0; + dout("note_last_dentry '%s'\n", fi->last_name); + return 0; +} + +static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir) +{ + struct ceph_file_info *fi = filp->private_data; + struct inode *inode = filp->f_dentry->d_inode; + struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_client *client = ceph_inode_to_client(inode); + struct ceph_mds_client *mdsc = &client->mdsc; + unsigned frag = fpos_frag(filp->f_pos); + int off = fpos_off(filp->f_pos); + int err; + u32 ftype; + struct ceph_mds_reply_info_parsed *rinfo; + const int max_entries = client->mount_args.max_readdir; + + dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off); + if (fi->at_end) + return 0; + + /* always start with . and .. */ + if (filp->f_pos == 0) { + /* note dir version at start of readdir so we can tell + * if any dentries get dropped */ + fi->dir_release_count = ci->i_release_count; + + dout("readdir off 0 -> '.'\n"); + if (filldir(dirent, ".", 1, ceph_make_fpos(0, 0), + inode->i_ino, inode->i_mode >> 12) < 0) + return 0; + filp->f_pos = 1; + off = 1; + } + if (filp->f_pos == 1) { + dout("readdir off 1 -> '..'\n"); + if (filldir(dirent, "..", 2, ceph_make_fpos(0, 1), + filp->f_dentry->d_parent->d_inode->i_ino, + inode->i_mode >> 12) < 0) + return 0; + filp->f_pos = 2; + off = 2; + } + + /* can we use the dcache? */ + spin_lock(&inode->i_lock); + if ((filp->f_pos == 2 || fi->dentry) && + !ceph_test_opt(client, NOASYNCREADDIR) && + (ci->i_ceph_flags & CEPH_I_COMPLETE) && + __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) { + err = __dcache_readdir(filp, dirent, filldir); + if (err != -EAGAIN) { + spin_unlock(&inode->i_lock); + return err; + } + } + spin_unlock(&inode->i_lock); + if (fi->dentry) { + err = note_last_dentry(fi, fi->dentry->d_name.name, + fi->dentry->d_name.len); + if (err) + return err; + dput(fi->dentry); + fi->dentry = NULL; + } + + /* proceed with a normal readdir */ + +more: + /* do we have the correct frag content buffered? */ + if (fi->frag != frag || fi->last_readdir == NULL) { + struct ceph_mds_request *req; + int op = ceph_snap(inode) == CEPH_SNAPDIR ? + CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR; + + /* discard old result, if any */ + if (fi->last_readdir) + ceph_mdsc_put_request(fi->last_readdir); + + /* requery frag tree, as the frag topology may have changed */ + frag = ceph_choose_frag(ceph_inode(inode), frag, NULL, NULL); + + dout("readdir fetching %llx.%llx frag %x offset '%s'\n", + ceph_vinop(inode), frag, fi->last_name); + req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); + if (IS_ERR(req)) + return PTR_ERR(req); + req->r_inode = igrab(inode); + req->r_dentry = dget(filp->f_dentry); + /* hints to request -> mds selection code */ + req->r_direct_mode = USE_AUTH_MDS; + req->r_direct_hash = ceph_frag_value(frag); + req->r_direct_is_hash = true; + req->r_path2 = kstrdup(fi->last_name, GFP_NOFS); + req->r_readdir_offset = fi->next_offset; + req->r_args.readdir.frag = cpu_to_le32(frag); + req->r_args.readdir.max_entries = cpu_to_le32(max_entries); + req->r_num_caps = max_entries; + err = ceph_mdsc_do_request(mdsc, NULL, req); + if (err < 0) { + ceph_mdsc_put_request(req); + return err; + } + dout("readdir got and parsed readdir result=%d" + " on frag %x, end=%d, complete=%d\n", err, frag, + (int)req->r_reply_info.dir_end, + (int)req->r_reply_info.dir_complete); + + if (!req->r_did_prepopulate) { + dout("readdir !did_prepopulate"); + fi->dir_release_count--; /* preclude I_COMPLETE */ + } + + /* note next offset and last dentry name */ + fi->offset = fi->next_offset; + fi->last_readdir = req; + + if (req->r_reply_info.dir_end) { + kfree(fi->last_name); + fi->last_name = NULL; + fi->next_offset = 0; + } else { + rinfo = &req->r_reply_info; + err = note_last_dentry(fi, + rinfo->dir_dname[rinfo->dir_nr-1], + rinfo->dir_dname_len[rinfo->dir_nr-1]); + if (err) + return err; + fi->next_offset += rinfo->dir_nr; + } + } + + rinfo = &fi->last_readdir->r_reply_info; + dout("readdir frag %x num %d off %d chunkoff %d\n", frag, + rinfo->dir_nr, off, fi->offset); + while (off - fi->offset >= 0 && off - fi->offset < rinfo->dir_nr) { + u64 pos = ceph_make_fpos(frag, off); + struct ceph_mds_reply_inode *in = + rinfo->dir_in[off - fi->offset].in; + dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n", + off, off - fi->offset, rinfo->dir_nr, pos, + rinfo->dir_dname_len[off - fi->offset], + rinfo->dir_dname[off - fi->offset], in); + BUG_ON(!in); + ftype = le32_to_cpu(in->mode) >> 12; + if (filldir(dirent, + rinfo->dir_dname[off - fi->offset], + rinfo->dir_dname_len[off - fi->offset], + pos, + le64_to_cpu(in->ino), + ftype) < 0) { + dout("filldir stopping us...\n"); + return 0; + } + off++; + filp->f_pos = pos + 1; + } + + if (fi->last_name) { + ceph_mdsc_put_request(fi->last_readdir); + fi->last_readdir = NULL; + goto more; + } + + /* more frags? */ + if (!ceph_frag_is_rightmost(frag)) { + frag = ceph_frag_next(frag); + off = 0; + filp->f_pos = ceph_make_fpos(frag, off); + dout("readdir next frag is %x\n", frag); + goto more; + } + fi->at_end = 1; + + /* + * if dir_release_count still matches the dir, no dentries + * were released during the whole readdir, and we should have + * the complete dir contents in our cache. + */ + spin_lock(&inode->i_lock); + if (ci->i_release_count == fi->dir_release_count) { + dout(" marking %p complete\n", inode); + ci->i_ceph_flags |= CEPH_I_COMPLETE; + ci->i_max_offset = filp->f_pos; + } + spin_unlock(&inode->i_lock); + + dout("readdir %p filp %p done.\n", inode, filp); + return 0; +} + +static void reset_readdir(struct ceph_file_info *fi) +{ + if (fi->last_readdir) { + ceph_mdsc_put_request(fi->last_readdir); + fi->last_readdir = NULL; + } + kfree(fi->last_name); + fi->next_offset = 2; /* compensate for . and .. */ + if (fi->dentry) { + dput(fi->dentry); + fi->dentry = NULL; + } + fi->at_end = 0; +} + +static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int origin) +{ + struct ceph_file_info *fi = file->private_data; + struct inode *inode = file->f_mapping->host; + loff_t old_offset = offset; + loff_t retval; + + mutex_lock(&inode->i_mutex); + switch (origin) { + case SEEK_END: + offset += inode->i_size + 2; /* FIXME */ + break; + case SEEK_CUR: + offset += file->f_pos; + } + retval = -EINVAL; + if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) { + if (offset != file->f_pos) { + file->f_pos = offset; + file->f_version = 0; + fi->at_end = 0; + } + retval = offset; + + /* + * discard buffered readdir content on seekdir(0), or + * seek to new frag, or seek prior to current chunk. + */ + if (offset == 0 || + fpos_frag(offset) != fpos_frag(old_offset) || + fpos_off(offset) < fi->offset) { + dout("dir_llseek dropping %p content\n", file); + reset_readdir(fi); + } + + /* bump dir_release_count if we did a forward seek */ + if (offset > old_offset) + fi->dir_release_count--; + } + mutex_unlock(&inode->i_mutex); + return retval; +} + +/* + * Process result of a lookup/open request. + * + * Mainly, make sure we return the final req->r_dentry (if it already + * existed) in place of the original VFS-provided dentry when they + * differ. + * + * Gracefully handle the case where the MDS replies with -ENOENT and + * no trace (which it may do, at its discretion, e.g., if it doesn't + * care to issue a lease on the negative dentry). + */ +struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, + struct dentry *dentry, int err) +{ + struct ceph_client *client = ceph_client(dentry->d_sb); + struct inode *parent = dentry->d_parent->d_inode; + + /* .snap dir? */ + if (err == -ENOENT && + ceph_vino(parent).ino != CEPH_INO_ROOT && /* no .snap in root dir */ + strcmp(dentry->d_name.name, client->mount_args.snapdir_name) == 0) { + struct inode *inode = ceph_get_snapdir(parent); + dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n", + dentry, dentry->d_name.len, dentry->d_name.name, inode); + d_add(dentry, inode); + err = 0; + } + + if (err == -ENOENT) { + /* no trace? */ + err = 0; + if (!req->r_reply_info.head->is_dentry) { + dout("ENOENT and no trace, dentry %p inode %p\n", + dentry, dentry->d_inode); + if (dentry->d_inode) { + d_drop(dentry); + err = -ENOENT; + } else { + d_add(dentry, NULL); + } + } + } + if (err) + dentry = ERR_PTR(err); + else if (dentry != req->r_dentry) + dentry = dget(req->r_dentry); /* we got spliced */ + else + dentry = NULL; + return dentry; +} + +/* + * Look up a single dir entry. If there is a lookup intent, inform + * the MDS so that it gets our 'caps wanted' value in a single op. + */ +static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, + struct nameidata *nd) +{ + struct ceph_client *client = ceph_sb_to_client(dir->i_sb); + struct ceph_mds_client *mdsc = &client->mdsc; + struct ceph_mds_request *req; + int op; + int err; + + dout("lookup %p dentry %p '%.*s'\n", + dir, dentry, dentry->d_name.len, dentry->d_name.name); + + if (dentry->d_name.len > NAME_MAX) + return ERR_PTR(-ENAMETOOLONG); + + err = ceph_init_dentry(dentry); + if (err < 0) + return ERR_PTR(err); + + /* open (but not create!) intent? */ + if (nd && + (nd->flags & LOOKUP_OPEN) && + (nd->flags & LOOKUP_CONTINUE) == 0 && /* only open last component */ + !(nd->intent.open.flags & O_CREAT)) { + int mode = nd->intent.open.create_mode & ~current->fs->umask; + return ceph_lookup_open(dir, dentry, nd, mode, 1); + } + + /* can we conclude ENOENT locally? */ + if (dentry->d_inode == NULL) { + struct ceph_inode_info *ci = ceph_inode(dir); + struct ceph_dentry_info *di = ceph_dentry(dentry); + + spin_lock(&dir->i_lock); + dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags); + if (strncmp(dentry->d_name.name, + client->mount_args.snapdir_name, + dentry->d_name.len) && + (ci->i_ceph_flags & CEPH_I_COMPLETE) && + (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) { + di->offset = ci->i_max_offset++; + spin_unlock(&dir->i_lock); + dout(" dir %p complete, -ENOENT\n", dir); + d_add(dentry, NULL); + di->lease_shared_gen = ci->i_shared_gen; + return NULL; + } + spin_unlock(&dir->i_lock); + } + + op = ceph_snap(dir) == CEPH_SNAPDIR ? + CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP; + req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS); + if (IS_ERR(req)) + return ERR_PTR(PTR_ERR(req)); + req->r_dentry = dget(dentry); + req->r_num_caps = 2; + /* we only need inode linkage */ + req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE); + req->r_locked_dir = dir; + err = ceph_mdsc_do_request(mdsc, NULL, req); + dentry = ceph_finish_lookup(req, dentry, err); + ceph_mdsc_put_request(req); /* will dput(dentry) */ + dout("lookup result=%p\n", dentry); + return dentry; +} + +/* + * If we do a create but get no trace back from the MDS, follow up with + * a lookup (the VFS expects us to link up the provided dentry). + */ +int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry) +{ + struct dentry *result = ceph_lookup(dir, dentry, NULL); + + if (result && !IS_ERR(result)) { + /* + * We created the item, then did a lookup, and found + * it was already linked to another inode we already + * had in our cache (and thus got spliced). Link our + * dentry to that inode, but don't hash it, just in + * case the VFS wants to dereference it. + */ + BUG_ON(!result->d_inode); + d_instantiate(dentry, result->d_inode); + return 0; + } + return PTR_ERR(result); +} + +static int ceph_mknod(struct inode *dir, struct dentry *dentry, + int mode, dev_t rdev) +{ + struct ceph_client *client = ceph_sb_to_client(dir->i_sb); + struct ceph_mds_client *mdsc = &client->mdsc; + struct ceph_mds_request *req; + int err; + + if (ceph_snap(dir) != CEPH_NOSNAP) + return -EROFS; + + dout("mknod in dir %p dentry %p mode 0%o rdev %d\n", + dir, dentry, mode, rdev); + req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS); + if (IS_ERR(req)) { + d_drop(dentry); + return PTR_ERR(req); + } + req->r_dentry = dget(dentry); + req->r_num_caps = 2; + req->r_locked_dir = dir; + req->r_args.mknod.mode = cpu_to_le32(mode); + req->r_args.mknod.rdev = cpu_to_le32(rdev); + req->r_dentry_drop = CEPH_CAP_FILE_SHARED; + req->r_dentry_unless = CEPH_CAP_FILE_EXCL; + err = ceph_mdsc_do_request(mdsc, dir, req); + if (!err && !req->r_reply_info.head->is_dentry) + err = ceph_handle_notrace_create(dir, dentry); + ceph_mdsc_put_request(req); + if (err) + d_drop(dentry); + return err; +} + +static int ceph_create(struct inode *dir, struct dentry *dentry, int mode, + struct nameidata *nd) +{ + dout("create in dir %p dentry %p name '%.*s'\n", + dir, dentry, dentry->d_name.len, dentry->d_name.name); + + if (ceph_snap(dir) != CEPH_NOSNAP) + return -EROFS; + + if (nd) { + BUG_ON((nd->flags & LOOKUP_OPEN) == 0); + dentry = ceph_lookup_open(dir, dentry, nd, mode, 0); + /* hrm, what should i do here if we get aliased? */ + if (IS_ERR(dentry)) + return PTR_ERR(dentry); + return 0; + } + + /* fall back to mknod */ + return ceph_mknod(dir, dentry, (mode & ~S_IFMT) | S_IFREG, 0); +} + +static int ceph_symlink(struct inode *dir, struct dentry *dentry, + const char *dest) +{ + struct ceph_client *client = ceph_sb_to_client(dir->i_sb); + struct ceph_mds_client *mdsc = &client->mdsc; + struct ceph_mds_request *req; + int err; + + if (ceph_snap(dir) != CEPH_NOSNAP) + return -EROFS; + + dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest); + req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS); + if (IS_ERR(req)) { + d_drop(dentry); + return PTR_ERR(req); + } + req->r_dentry = dget(dentry); + req->r_num_caps = 2; + req->r_path2 = kstrdup(dest, GFP_NOFS); + req->r_locked_dir = dir; + req->r_dentry_drop = CEPH_CAP_FILE_SHARED; + req->r_dentry_unless = CEPH_CAP_FILE_EXCL; + err = ceph_mdsc_do_request(mdsc, dir, req); + if (!err && !req->r_reply_info.head->is_dentry) + err = ceph_handle_notrace_create(dir, dentry); + ceph_mdsc_put_request(req); + if (err) + d_drop(dentry); + return err; +} + +static int ceph_mkdir(struct inode *dir, struct dentry *dentry, int mode) +{ + struct ceph_client *client = ceph_sb_to_client(dir->i_sb); + struct ceph_mds_client *mdsc = &client->mdsc; + struct ceph_mds_request *req; + int err = -EROFS; + int op; + + if (ceph_snap(dir) == CEPH_SNAPDIR) { + /* mkdir .snap/foo is a MKSNAP */ + op = CEPH_MDS_OP_MKSNAP; + dout("mksnap dir %p snap '%.*s' dn %p\n", dir, + dentry->d_name.len, dentry->d_name.name, dentry); + } else if (ceph_snap(dir) == CEPH_NOSNAP) { + dout("mkdir dir %p dn %p mode 0%o\n", dir, dentry, mode); + op = CEPH_MDS_OP_MKDIR; + } else { + goto out; + } + req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto out; + } + + req->r_dentry = dget(dentry); + req->r_num_caps = 2; + req->r_locked_dir = dir; + req->r_args.mkdir.mode = cpu_to_le32(mode); + req->r_dentry_drop = CEPH_CAP_FILE_SHARED; + req->r_dentry_unless = CEPH_CAP_FILE_EXCL; + err = ceph_mdsc_do_request(mdsc, dir, req); + if (!err && !req->r_reply_info.head->is_dentry) + err = ceph_handle_notrace_create(dir, dentry); + ceph_mdsc_put_request(req); +out: + if (err < 0) + d_drop(dentry); + return err; +} + +static int ceph_link(struct dentry *old_dentry, struct inode *dir, + struct dentry *dentry) +{ + struct ceph_client *client = ceph_sb_to_client(dir->i_sb); + struct ceph_mds_client *mdsc = &client->mdsc; + struct ceph_mds_request *req; + int err; + + if (ceph_snap(dir) != CEPH_NOSNAP) + return -EROFS; + + dout("link in dir %p old_dentry %p dentry %p\n", dir, + old_dentry, dentry); + req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS); + if (IS_ERR(req)) { + d_drop(dentry); + return PTR_ERR(req); + } + req->r_dentry = dget(dentry); + req->r_num_caps = 2; + req->r_old_dentry = dget(old_dentry); /* or inode? hrm. */ + req->r_locked_dir = dir; + req->r_dentry_drop = CEPH_CAP_FILE_SHARED; + req->r_dentry_unless = CEPH_CAP_FILE_EXCL; + err = ceph_mdsc_do_request(mdsc, dir, req); + if (err) + d_drop(dentry); + else if (!req->r_reply_info.head->is_dentry) + d_instantiate(dentry, igrab(old_dentry->d_inode)); + ceph_mdsc_put_request(req); + return err; +} + +/* + * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it + * looks like the link count will hit 0, drop any other caps (other + * than PIN) we don't specifically want (due to the file still being + * open). + */ +static int drop_caps_for_unlink(struct inode *inode) +{ + struct ceph_inode_info *ci = ceph_inode(inode); + int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL; + + spin_lock(&inode->i_lock); + if (inode->i_nlink == 1) { + drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN); + ci->i_ceph_flags |= CEPH_I_NODELAY; + } + spin_unlock(&inode->i_lock); + return drop; +} + +/* + * rmdir and unlink are differ only by the metadata op code + */ +static int ceph_unlink(struct inode *dir, struct dentry *dentry) +{ + struct ceph_client *client = ceph_sb_to_client(dir->i_sb); + struct ceph_mds_client *mdsc = &client->mdsc; + struct inode *inode = dentry->d_inode; + struct ceph_mds_request *req; + int err = -EROFS; + int op; + + if (ceph_snap(dir) == CEPH_SNAPDIR) { + /* rmdir .snap/foo is RMSNAP */ + dout("rmsnap dir %p '%.*s' dn %p\n", dir, dentry->d_name.len, + dentry->d_name.name, dentry); + op = CEPH_MDS_OP_RMSNAP; + } else if (ceph_snap(dir) == CEPH_NOSNAP) { + dout("unlink/rmdir dir %p dn %p inode %p\n", + dir, dentry, inode); + op = ((dentry->d_inode->i_mode & S_IFMT) == S_IFDIR) ? + CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK; + } else + goto out; + req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto out; + } + req->r_dentry = dget(dentry); + req->r_num_caps = 2; + req->r_locked_dir = dir; + req->r_dentry_drop = CEPH_CAP_FILE_SHARED; + req->r_dentry_unless = CEPH_CAP_FILE_EXCL; + req->r_inode_drop = drop_caps_for_unlink(inode); + err = ceph_mdsc_do_request(mdsc, dir, req); + if (!err && !req->r_reply_info.head->is_dentry) + d_delete(dentry); + ceph_mdsc_put_request(req); +out: + return err; +} + +static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry) +{ + struct ceph_client *client = ceph_sb_to_client(old_dir->i_sb); + struct ceph_mds_client *mdsc = &client->mdsc; + struct ceph_mds_request *req; + int err; + + if (ceph_snap(old_dir) != ceph_snap(new_dir)) + return -EXDEV; + if (ceph_snap(old_dir) != CEPH_NOSNAP || + ceph_snap(new_dir) != CEPH_NOSNAP) + return -EROFS; + dout("rename dir %p dentry %p to dir %p dentry %p\n", + old_dir, old_dentry, new_dir, new_dentry); + req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS); + if (IS_ERR(req)) + return PTR_ERR(req); + req->r_dentry = dget(new_dentry); + req->r_num_caps = 2; + req->r_old_dentry = dget(old_dentry); + req->r_locked_dir = new_dir; + req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED; + req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL; + req->r_dentry_drop = CEPH_CAP_FILE_SHARED; + req->r_dentry_unless = CEPH_CAP_FILE_EXCL; + /* release LINK_RDCACHE on source inode (mds will lock it) */ + req->r_old_inode_drop = CEPH_CAP_LINK_SHARED; + if (new_dentry->d_inode) + req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode); + err = ceph_mdsc_do_request(mdsc, old_dir, req); + if (!err && !req->r_reply_info.head->is_dentry) { + /* + * Normally d_move() is done by fill_trace (called by + * do_request, above). If there is no trace, we need + * to do it here. + */ + d_move(old_dentry, new_dentry); + } + ceph_mdsc_put_request(req); + return err; +} + + +/* + * Check if dentry lease is valid. If not, delete the lease. Try to + * renew if the least is more than half up. + */ +static int dentry_lease_is_valid(struct dentry *dentry) +{ + struct ceph_dentry_info *di; + struct ceph_mds_session *s; + int valid = 0; + u32 gen; + unsigned long ttl; + struct ceph_mds_session *session = NULL; + struct inode *dir = NULL; + u32 seq = 0; + + spin_lock(&dentry->d_lock); + di = ceph_dentry(dentry); + if (di && di->lease_session) { + s = di->lease_session; + spin_lock(&s->s_cap_lock); + gen = s->s_cap_gen; + ttl = s->s_cap_ttl; + spin_unlock(&s->s_cap_lock); + + if (di->lease_gen == gen && + time_before(jiffies, dentry->d_time) && + time_before(jiffies, ttl)) { + valid = 1; + if (di->lease_renew_after && + time_after(jiffies, di->lease_renew_after)) { + /* we should renew */ + dir = dentry->d_parent->d_inode; + session = ceph_get_mds_session(s); + seq = di->lease_seq; + di->lease_renew_after = 0; + di->lease_renew_from = jiffies; + } + } else { + __ceph_mdsc_drop_dentry_lease(dentry); + } + } + spin_unlock(&dentry->d_lock); + + if (session) { + ceph_mdsc_lease_send_msg(session, dir, dentry, + CEPH_MDS_LEASE_RENEW, seq); + ceph_put_mds_session(session); + } + dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid); + return valid; +} + +/* + * Check if directory-wide content lease/cap is valid. + */ +static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry) +{ + struct ceph_inode_info *ci = ceph_inode(dir); + struct ceph_dentry_info *di = ceph_dentry(dentry); + int valid = 0; + + spin_lock(&dir->i_lock); + if (ci->i_shared_gen == di->lease_shared_gen) + valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1); + spin_unlock(&dir->i_lock); + dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n", + dir, (unsigned)ci->i_shared_gen, dentry, + (unsigned)di->lease_shared_gen, valid); + return valid; +} + +/* + * Check if cached dentry can be trusted. + */ +static int ceph_d_revalidate(struct dentry *dentry, struct nameidata *nd) +{ + struct inode *dir = dentry->d_parent->d_inode; + + dout("d_revalidate %p '%.*s' inode %p\n", dentry, + dentry->d_name.len, dentry->d_name.name, dentry->d_inode); + + /* always trust cached snapped dentries, snapdir dentry */ + if (ceph_snap(dir) != CEPH_NOSNAP) { + dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry, + dentry->d_name.len, dentry->d_name.name, dentry->d_inode); + goto out_touch; + } + if (dentry->d_inode && ceph_snap(dentry->d_inode) == CEPH_SNAPDIR) + goto out_touch; + + if (dentry_lease_is_valid(dentry) || + dir_lease_is_valid(dir, dentry)) + goto out_touch; + + dout("d_revalidate %p invalid\n", dentry); + d_drop(dentry); + return 0; +out_touch: + ceph_dentry_lru_touch(dentry); + return 1; +} + +/* + * When a dentry is released, clear the dir I_COMPLETE if it was part + * of the current dir gen. + */ +static void ceph_dentry_release(struct dentry *dentry) +{ + struct ceph_dentry_info *di = ceph_dentry(dentry); + struct inode *parent_inode = dentry->d_parent->d_inode; + + if (parent_inode) { + struct ceph_inode_info *ci = ceph_inode(parent_inode); + + spin_lock(&parent_inode->i_lock); + if (ci->i_shared_gen == di->lease_shared_gen) { + dout(" clearing %p complete (d_release)\n", + parent_inode); + ci->i_ceph_flags &= ~CEPH_I_COMPLETE; + ci->i_release_count++; + } + spin_unlock(&parent_inode->i_lock); + } + if (di) { + ceph_dentry_lru_del(dentry); + if (di->lease_session) + ceph_put_mds_session(di->lease_session); + kmem_cache_free(ceph_dentry_cachep, di); + dentry->d_fsdata = NULL; + } +} + +static int ceph_snapdir_d_revalidate(struct dentry *dentry, + struct nameidata *nd) +{ + /* + * Eventually, we'll want to revalidate snapped metadata + * too... probably... + */ + return 1; +} + + + +/* + * read() on a dir. This weird interface hack only works if mounted + * with '-o dirstat'. + */ +static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size, + loff_t *ppos) +{ + struct ceph_file_info *cf = file->private_data; + struct inode *inode = file->f_dentry->d_inode; + struct ceph_inode_info *ci = ceph_inode(inode); + int left; + + if (!ceph_test_opt(ceph_client(inode->i_sb), DIRSTAT)) + return -EISDIR; + + if (!cf->dir_info) { + cf->dir_info = kmalloc(1024, GFP_NOFS); + if (!cf->dir_info) + return -ENOMEM; + cf->dir_info_len = + sprintf(cf->dir_info, + "entries: %20lld\n" + " files: %20lld\n" + " subdirs: %20lld\n" + "rentries: %20lld\n" + " rfiles: %20lld\n" + " rsubdirs: %20lld\n" + "rbytes: %20lld\n" + "rctime: %10ld.%09ld\n", + ci->i_files + ci->i_subdirs, + ci->i_files, + ci->i_subdirs, + ci->i_rfiles + ci->i_rsubdirs, + ci->i_rfiles, + ci->i_rsubdirs, + ci->i_rbytes, + (long)ci->i_rctime.tv_sec, + (long)ci->i_rctime.tv_nsec); + } + + if (*ppos >= cf->dir_info_len) + return 0; + size = min_t(unsigned, size, cf->dir_info_len-*ppos); + left = copy_to_user(buf, cf->dir_info + *ppos, size); + if (left == size) + return -EFAULT; + *ppos += (size - left); + return size - left; +} + +/* + * an fsync() on a dir will wait for any uncommitted directory + * operations to commit. + */ +static int ceph_dir_fsync(struct file *file, struct dentry *dentry, + int datasync) +{ + struct inode *inode = dentry->d_inode; + struct ceph_inode_info *ci = ceph_inode(inode); + struct list_head *head = &ci->i_unsafe_dirops; + struct ceph_mds_request *req; + u64 last_tid; + int ret = 0; + + dout("dir_fsync %p\n", inode); + spin_lock(&ci->i_unsafe_lock); + if (list_empty(head)) + goto out; + + req = list_entry(head->prev, + struct ceph_mds_request, r_unsafe_dir_item); + last_tid = req->r_tid; + + do { + ceph_mdsc_get_request(req); + spin_unlock(&ci->i_unsafe_lock); + dout("dir_fsync %p wait on tid %llu (until %llu)\n", + inode, req->r_tid, last_tid); + if (req->r_timeout) { + ret = wait_for_completion_timeout( + &req->r_safe_completion, req->r_timeout); + if (ret > 0) + ret = 0; + else if (ret == 0) + ret = -EIO; /* timed out */ + } else { + wait_for_completion(&req->r_safe_completion); + } + spin_lock(&ci->i_unsafe_lock); + ceph_mdsc_put_request(req); + + if (ret || list_empty(head)) + break; + req = list_entry(head->next, + struct ceph_mds_request, r_unsafe_dir_item); + } while (req->r_tid < last_tid); +out: + spin_unlock(&ci->i_unsafe_lock); + return ret; +} + +/* + * We maintain a private dentry LRU. + * + * FIXME: this needs to be changed to a per-mds lru to be useful. + */ +void ceph_dentry_lru_add(struct dentry *dn) +{ + struct ceph_dentry_info *di = ceph_dentry(dn); + struct ceph_mds_client *mdsc; + dout("dentry_lru_add %p %p\t%.*s\n", + di, dn, dn->d_name.len, dn->d_name.name); + + if (di) { + mdsc = &ceph_client(dn->d_sb)->mdsc; + spin_lock(&mdsc->dentry_lru_lock); + list_add_tail(&di->lru, &mdsc->dentry_lru); + mdsc->num_dentry++; + spin_unlock(&mdsc->dentry_lru_lock); + } +} + +void ceph_dentry_lru_touch(struct dentry *dn) +{ + struct ceph_dentry_info *di = ceph_dentry(dn); + struct ceph_mds_client *mdsc; + dout("dentry_lru_touch %p %p\t%.*s\n", + di, dn, dn->d_name.len, dn->d_name.name); + + if (di) { + mdsc = &ceph_client(dn->d_sb)->mdsc; + spin_lock(&mdsc->dentry_lru_lock); + list_move_tail(&di->lru, &mdsc->dentry_lru); + spin_unlock(&mdsc->dentry_lru_lock); + } +} + +void ceph_dentry_lru_del(struct dentry *dn) +{ + struct ceph_dentry_info *di = ceph_dentry(dn); + struct ceph_mds_client *mdsc; + + dout("dentry_lru_del %p %p\t%.*s\n", + di, dn, dn->d_name.len, dn->d_name.name); + if (di) { + mdsc = &ceph_client(dn->d_sb)->mdsc; + spin_lock(&mdsc->dentry_lru_lock); + list_del_init(&di->lru); + mdsc->num_dentry--; + spin_unlock(&mdsc->dentry_lru_lock); + } +} + +const struct file_operations ceph_dir_fops = { + .read = ceph_read_dir, + .readdir = ceph_readdir, + .llseek = ceph_dir_llseek, + .open = ceph_open, + .release = ceph_release, + .unlocked_ioctl = ceph_ioctl, + .fsync = ceph_dir_fsync, +}; + +const struct inode_operations ceph_dir_iops = { + .lookup = ceph_lookup, + .permission = ceph_permission, + .getattr = ceph_getattr, + .setattr = ceph_setattr, + .setxattr = ceph_setxattr, + .getxattr = ceph_getxattr, + .listxattr = ceph_listxattr, + .removexattr = ceph_removexattr, + .mknod = ceph_mknod, + .symlink = ceph_symlink, + .mkdir = ceph_mkdir, + .link = ceph_link, + .unlink = ceph_unlink, + .rmdir = ceph_unlink, + .rename = ceph_rename, + .create = ceph_create, +}; + +struct dentry_operations ceph_dentry_ops = { + .d_revalidate = ceph_d_revalidate, + .d_release = ceph_dentry_release, +}; + +struct dentry_operations ceph_snapdir_dentry_ops = { + .d_revalidate = ceph_snapdir_d_revalidate, +}; + +struct dentry_operations ceph_snap_dentry_ops = { +}; -- cgit v1.2.3 From 124e68e74099090e28da5518f73fda878e7e8232 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 6 Oct 2009 11:31:08 -0700 Subject: ceph: file operations File open and close operations, and read and write methods that ensure we have obtained the proper capabilities from the MDS cluster before performing IO on a file. We take references on held capabilities for the duration of the read/write to avoid prematurely releasing them back to the MDS. We implement two main paths for read and write: one that is buffered (and uses generic_aio_{read,write}), and one that is fully synchronous and blocking (operating either on a __user pointer or, if O_DIRECT, directly on user pages). Signed-off-by: Sage Weil --- fs/ceph/file.c | 904 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 904 insertions(+) create mode 100644 fs/ceph/file.c diff --git a/fs/ceph/file.c b/fs/ceph/file.c new file mode 100644 index 000000000000..1bd57c8953bf --- /dev/null +++ b/fs/ceph/file.c @@ -0,0 +1,904 @@ +#include "ceph_debug.h" + +#include +#include +#include +#include + +#include "super.h" +#include "mds_client.h" + +/* + * Ceph file operations + * + * Implement basic open/close functionality, and implement + * read/write. + * + * We implement three modes of file I/O: + * - buffered uses the generic_file_aio_{read,write} helpers + * + * - synchronous is used when there is multi-client read/write + * sharing, avoids the page cache, and synchronously waits for an + * ack from the OSD. + * + * - direct io takes the variant of the sync path that references + * user pages directly. + * + * fsync() flushes and waits on dirty pages, but just queues metadata + * for writeback: since the MDS can recover size and mtime there is no + * need to wait for MDS acknowledgement. + */ + + +/* + * Prepare an open request. Preallocate ceph_cap to avoid an + * inopportune ENOMEM later. + */ +static struct ceph_mds_request * +prepare_open_request(struct super_block *sb, int flags, int create_mode) +{ + struct ceph_client *client = ceph_sb_to_client(sb); + struct ceph_mds_client *mdsc = &client->mdsc; + struct ceph_mds_request *req; + int want_auth = USE_ANY_MDS; + int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN; + + if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC)) + want_auth = USE_AUTH_MDS; + + req = ceph_mdsc_create_request(mdsc, op, want_auth); + if (IS_ERR(req)) + goto out; + req->r_fmode = ceph_flags_to_mode(flags); + req->r_args.open.flags = cpu_to_le32(flags); + req->r_args.open.mode = cpu_to_le32(create_mode); + req->r_args.open.preferred = -1; +out: + return req; +} + +/* + * initialize private struct file data. + * if we fail, clean up by dropping fmode reference on the ceph_inode + */ +static int ceph_init_file(struct inode *inode, struct file *file, int fmode) +{ + struct ceph_file_info *cf; + int ret = 0; + + switch (inode->i_mode & S_IFMT) { + case S_IFREG: + case S_IFDIR: + dout("init_file %p %p 0%o (regular)\n", inode, file, + inode->i_mode); + cf = kmem_cache_alloc(ceph_file_cachep, GFP_NOFS | __GFP_ZERO); + if (cf == NULL) { + ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ + return -ENOMEM; + } + cf->fmode = fmode; + cf->next_offset = 2; + file->private_data = cf; + BUG_ON(inode->i_fop->release != ceph_release); + break; + + case S_IFLNK: + dout("init_file %p %p 0%o (symlink)\n", inode, file, + inode->i_mode); + ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ + break; + + default: + dout("init_file %p %p 0%o (special)\n", inode, file, + inode->i_mode); + /* + * we need to drop the open ref now, since we don't + * have .release set to ceph_release. + */ + ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ + BUG_ON(inode->i_fop->release == ceph_release); + + /* call the proper open fop */ + ret = inode->i_fop->open(inode, file); + } + return ret; +} + +/* + * If the filp already has private_data, that means the file was + * already opened by intent during lookup, and we do nothing. + * + * If we already have the requisite capabilities, we can satisfy + * the open request locally (no need to request new caps from the + * MDS). We do, however, need to inform the MDS (asynchronously) + * if our wanted caps set expands. + */ +int ceph_open(struct inode *inode, struct file *file) +{ + struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_client *client = ceph_sb_to_client(inode->i_sb); + struct ceph_mds_client *mdsc = &client->mdsc; + struct ceph_mds_request *req; + struct ceph_file_info *cf = file->private_data; + struct inode *parent_inode = file->f_dentry->d_parent->d_inode; + int err; + int flags, fmode, wanted; + + if (cf) { + dout("open file %p is already opened\n", file); + return 0; + } + + /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */ + flags = file->f_flags & ~(O_CREAT|O_EXCL); + if (S_ISDIR(inode->i_mode)) + flags = O_DIRECTORY; /* mds likes to know */ + + dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode, + ceph_vinop(inode), file, flags, file->f_flags); + fmode = ceph_flags_to_mode(flags); + wanted = ceph_caps_for_mode(fmode); + + /* snapped files are read-only */ + if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE)) + return -EROFS; + + /* trivially open snapdir */ + if (ceph_snap(inode) == CEPH_SNAPDIR) { + spin_lock(&inode->i_lock); + __ceph_get_fmode(ci, fmode); + spin_unlock(&inode->i_lock); + return ceph_init_file(inode, file, fmode); + } + + /* + * No need to block if we have any caps. Update wanted set + * asynchronously. + */ + spin_lock(&inode->i_lock); + if (__ceph_is_any_real_caps(ci)) { + int mds_wanted = __ceph_caps_mds_wanted(ci); + int issued = __ceph_caps_issued(ci, NULL); + + dout("open %p fmode %d want %s issued %s using existing\n", + inode, fmode, ceph_cap_string(wanted), + ceph_cap_string(issued)); + __ceph_get_fmode(ci, fmode); + spin_unlock(&inode->i_lock); + + /* adjust wanted? */ + if ((issued & wanted) != wanted && + (mds_wanted & wanted) != wanted && + ceph_snap(inode) != CEPH_SNAPDIR) + ceph_check_caps(ci, 0, NULL); + + return ceph_init_file(inode, file, fmode); + } else if (ceph_snap(inode) != CEPH_NOSNAP && + (ci->i_snap_caps & wanted) == wanted) { + __ceph_get_fmode(ci, fmode); + spin_unlock(&inode->i_lock); + return ceph_init_file(inode, file, fmode); + } + spin_unlock(&inode->i_lock); + + dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted)); + req = prepare_open_request(inode->i_sb, flags, 0); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto out; + } + req->r_inode = igrab(inode); + req->r_num_caps = 1; + err = ceph_mdsc_do_request(mdsc, parent_inode, req); + if (!err) + err = ceph_init_file(inode, file, req->r_fmode); + ceph_mdsc_put_request(req); + dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode)); +out: + return err; +} + + +/* + * Do a lookup + open with a single request. + * + * If this succeeds, but some subsequent check in the vfs + * may_open() fails, the struct *file gets cleaned up (i.e. + * ceph_release gets called). So fear not! + */ +/* + * flags + * path_lookup_open -> LOOKUP_OPEN + * path_lookup_create -> LOOKUP_OPEN|LOOKUP_CREATE + */ +struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry, + struct nameidata *nd, int mode, + int locked_dir) +{ + struct ceph_client *client = ceph_sb_to_client(dir->i_sb); + struct ceph_mds_client *mdsc = &client->mdsc; + struct file *file = nd->intent.open.file; + struct inode *parent_inode = get_dentry_parent_inode(file->f_dentry); + struct ceph_mds_request *req; + int err; + int flags = nd->intent.open.flags - 1; /* silly vfs! */ + + dout("ceph_lookup_open dentry %p '%.*s' flags %d mode 0%o\n", + dentry, dentry->d_name.len, dentry->d_name.name, flags, mode); + + /* do the open */ + req = prepare_open_request(dir->i_sb, flags, mode); + if (IS_ERR(req)) + return ERR_PTR(PTR_ERR(req)); + req->r_dentry = dget(dentry); + req->r_num_caps = 2; + if (flags & O_CREAT) { + req->r_dentry_drop = CEPH_CAP_FILE_SHARED; + req->r_dentry_unless = CEPH_CAP_FILE_EXCL; + } + req->r_locked_dir = dir; /* caller holds dir->i_mutex */ + err = ceph_mdsc_do_request(mdsc, parent_inode, req); + dentry = ceph_finish_lookup(req, dentry, err); + if (!err && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry) + err = ceph_handle_notrace_create(dir, dentry); + if (!err) + err = ceph_init_file(req->r_dentry->d_inode, file, + req->r_fmode); + ceph_mdsc_put_request(req); + dout("ceph_lookup_open result=%p\n", dentry); + return dentry; +} + +int ceph_release(struct inode *inode, struct file *file) +{ + struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_file_info *cf = file->private_data; + + dout("release inode %p file %p\n", inode, file); + ceph_put_fmode(ci, cf->fmode); + if (cf->last_readdir) + ceph_mdsc_put_request(cf->last_readdir); + kfree(cf->last_name); + kfree(cf->dir_info); + dput(cf->dentry); + kmem_cache_free(ceph_file_cachep, cf); + return 0; +} + +/* + * build a vector of user pages + */ +static struct page **get_direct_page_vector(const char __user *data, + int num_pages, + loff_t off, size_t len) +{ + struct page **pages; + int rc; + + pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS); + if (!pages) + return ERR_PTR(-ENOMEM); + + down_read(¤t->mm->mmap_sem); + rc = get_user_pages(current, current->mm, (unsigned long)data, + num_pages, 0, 0, pages, NULL); + up_read(¤t->mm->mmap_sem); + if (rc < 0) + goto fail; + return pages; + +fail: + kfree(pages); + return ERR_PTR(rc); +} + +static void put_page_vector(struct page **pages, int num_pages) +{ + int i; + + for (i = 0; i < num_pages; i++) + put_page(pages[i]); + kfree(pages); +} + +void ceph_release_page_vector(struct page **pages, int num_pages) +{ + int i; + + for (i = 0; i < num_pages; i++) + __free_pages(pages[i], 0); + kfree(pages); +} + +/* + * allocate a vector new pages + */ +static struct page **alloc_page_vector(int num_pages) +{ + struct page **pages; + int i; + + pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS); + if (!pages) + return ERR_PTR(-ENOMEM); + for (i = 0; i < num_pages; i++) { + pages[i] = alloc_page(GFP_NOFS); + if (pages[i] == NULL) { + ceph_release_page_vector(pages, i); + return ERR_PTR(-ENOMEM); + } + } + return pages; +} + +/* + * copy user data into a page vector + */ +static int copy_user_to_page_vector(struct page **pages, + const char __user *data, + loff_t off, size_t len) +{ + int i = 0; + int po = off & ~PAGE_CACHE_MASK; + int left = len; + int l, bad; + + while (left > 0) { + l = min_t(int, PAGE_CACHE_SIZE-po, left); + bad = copy_from_user(page_address(pages[i]) + po, data, l); + if (bad == l) + return -EFAULT; + data += l - bad; + left -= l - bad; + if (po) { + po += l - bad; + if (po == PAGE_CACHE_SIZE) + po = 0; + } + } + return len; +} + +/* + * copy user data from a page vector into a user pointer + */ +static int copy_page_vector_to_user(struct page **pages, char __user *data, + loff_t off, size_t len) +{ + int i = 0; + int po = off & ~PAGE_CACHE_MASK; + int left = len; + int l, bad; + + while (left > 0) { + l = min_t(int, left, PAGE_CACHE_SIZE-po); + bad = copy_to_user(data, page_address(pages[i]) + po, l); + if (bad == l) + return -EFAULT; + data += l - bad; + left -= l - bad; + if (po) { + po += l - bad; + if (po == PAGE_CACHE_SIZE) + po = 0; + } + i++; + } + return len; +} + +/* + * Zero an extent within a page vector. Offset is relative to the + * start of the first page. + */ +static void zero_page_vector_range(int off, int len, struct page **pages) +{ + int i = off >> PAGE_CACHE_SHIFT; + + dout("zero_page_vector_page %u~%u\n", off, len); + BUG_ON(len < PAGE_CACHE_SIZE); + + /* leading partial page? */ + if (off & ~PAGE_CACHE_MASK) { + dout("zeroing %d %p head from %d\n", i, pages[i], + (int)(off & ~PAGE_CACHE_MASK)); + zero_user_segment(pages[i], off & ~PAGE_CACHE_MASK, + PAGE_CACHE_SIZE); + off += PAGE_CACHE_SIZE; + off &= PAGE_CACHE_MASK; + i++; + } + while (len >= PAGE_CACHE_SIZE) { + dout("zeroing %d %p\n", i, pages[i]); + zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE); + off += PAGE_CACHE_SIZE; + len -= PAGE_CACHE_SIZE; + i++; + } + /* trailing partial page? */ + if (len) { + dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len); + zero_user_segment(pages[i], 0, len); + } +} + + +/* + * Read a range of bytes striped over one or more objects. Iterate over + * objects we stripe over. (That's not atomic, but good enough for now.) + * + * If we get a short result from the OSD, check against i_size; we need to + * only return a short read to the caller if we hit EOF. + */ +static int striped_read(struct inode *inode, + u64 off, u64 len, + struct page **pages, int num_pages) +{ + struct ceph_client *client = ceph_inode_to_client(inode); + struct ceph_inode_info *ci = ceph_inode(inode); + u64 pos, this_len; + int page_off = off & ~PAGE_CACHE_SIZE; /* first byte's offset in page */ + int left, pages_left; + int read; + struct page **page_pos; + int ret; + bool hit_stripe, was_short; + + /* + * we may need to do multiple reads. not atomic, unfortunately. + */ + pos = off; + left = len; + page_pos = pages; + pages_left = num_pages; + read = 0; + +more: + this_len = left; + ret = ceph_osdc_readpages(&client->osdc, ceph_vino(inode), + &ci->i_layout, pos, &this_len, + ci->i_truncate_seq, + ci->i_truncate_size, + page_pos, pages_left); + hit_stripe = this_len < left; + was_short = ret >= 0 && ret < this_len; + if (ret == -ENOENT) + ret = 0; + dout("striped_read %llu~%u (read %u) got %d%s%s\n", pos, left, read, + ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : ""); + + if (ret > 0) { + int didpages = + ((pos & ~PAGE_CACHE_MASK) + ret) >> PAGE_CACHE_SHIFT; + + if (read < pos - off) { + dout(" zero gap %llu to %llu\n", off + read, pos); + zero_page_vector_range(page_off + read, + pos - off - read, pages); + } + pos += ret; + read = pos - off; + left -= ret; + page_pos += didpages; + pages_left -= didpages; + + /* hit stripe? */ + if (left && hit_stripe) + goto more; + } + + if (was_short) { + /* was original extent fully inside i_size? */ + if (pos + left <= inode->i_size) { + dout("zero tail\n"); + zero_page_vector_range(page_off + read, len - read, + pages); + goto out; + } + + /* check i_size */ + ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE); + if (ret < 0) + goto out; + + /* hit EOF? */ + if (pos >= inode->i_size) + goto out; + + goto more; + } + +out: + if (ret >= 0) + ret = read; + dout("striped_read returns %d\n", ret); + return ret; +} + +/* + * Completely synchronous read and write methods. Direct from __user + * buffer to osd, or directly to user pages (if O_DIRECT). + * + * If the read spans object boundary, just do multiple reads. + */ +static ssize_t ceph_sync_read(struct file *file, char __user *data, + unsigned len, loff_t *poff) +{ + struct inode *inode = file->f_dentry->d_inode; + struct page **pages; + u64 off = *poff; + int num_pages = calc_pages_for(off, len); + int ret; + + dout("sync_read on file %p %llu~%u %s\n", file, off, len, + (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); + + if (file->f_flags & O_DIRECT) { + pages = get_direct_page_vector(data, num_pages, off, len); + + /* + * flush any page cache pages in this range. this + * will make concurrent normal and O_DIRECT io slow, + * but it will at least behave sensibly when they are + * in sequence. + */ + filemap_write_and_wait(inode->i_mapping); + } else { + pages = alloc_page_vector(num_pages); + } + if (IS_ERR(pages)) + return PTR_ERR(pages); + + ret = striped_read(inode, off, len, pages, num_pages); + + if (ret >= 0 && (file->f_flags & O_DIRECT) == 0) + ret = copy_page_vector_to_user(pages, data, off, ret); + if (ret >= 0) + *poff = off + ret; + + if (file->f_flags & O_DIRECT) + put_page_vector(pages, num_pages); + else + ceph_release_page_vector(pages, num_pages); + dout("sync_read result %d\n", ret); + return ret; +} + +/* + * Write commit callback, called if we requested both an ACK and + * ONDISK commit reply from the OSD. + */ +static void sync_write_commit(struct ceph_osd_request *req, + struct ceph_msg *msg) +{ + struct ceph_inode_info *ci = ceph_inode(req->r_inode); + + dout("sync_write_commit %p tid %llu\n", req, req->r_tid); + spin_lock(&ci->i_unsafe_lock); + list_del_init(&req->r_unsafe_item); + spin_unlock(&ci->i_unsafe_lock); + ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR); +} + +/* + * Synchronous write, straight from __user pointer or user pages (if + * O_DIRECT). + * + * If write spans object boundary, just do multiple writes. (For a + * correct atomic write, we should e.g. take write locks on all + * objects, rollback on failure, etc.) + */ +static ssize_t ceph_sync_write(struct file *file, const char __user *data, + size_t left, loff_t *offset) +{ + struct inode *inode = file->f_dentry->d_inode; + struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_client *client = ceph_inode_to_client(inode); + struct ceph_osd_request *req; + struct page **pages; + int num_pages; + long long unsigned pos; + u64 len; + int written = 0; + int flags; + int do_sync = 0; + int check_caps = 0; + int ret; + struct timespec mtime = CURRENT_TIME; + + if (ceph_snap(file->f_dentry->d_inode) != CEPH_NOSNAP) + return -EROFS; + + dout("sync_write on file %p %lld~%u %s\n", file, *offset, + (unsigned)left, (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); + + if (file->f_flags & O_APPEND) + pos = i_size_read(inode); + else + pos = *offset; + + flags = CEPH_OSD_FLAG_ORDERSNAP | + CEPH_OSD_FLAG_ONDISK | + CEPH_OSD_FLAG_WRITE; + if ((file->f_flags & (O_SYNC|O_DIRECT)) == 0) + flags |= CEPH_OSD_FLAG_ACK; + else + do_sync = 1; + + /* + * we may need to do multiple writes here if we span an object + * boundary. this isn't atomic, unfortunately. :( + */ +more: + len = left; + req = ceph_osdc_new_request(&client->osdc, &ci->i_layout, + ceph_vino(inode), pos, &len, + CEPH_OSD_OP_WRITE, flags, + ci->i_snap_realm->cached_context, + do_sync, + ci->i_truncate_seq, ci->i_truncate_size, + &mtime, false, 2); + if (IS_ERR(req)) + return PTR_ERR(req); + + num_pages = calc_pages_for(pos, len); + + if (file->f_flags & O_DIRECT) { + pages = get_direct_page_vector(data, num_pages, pos, len); + if (IS_ERR(pages)) { + ret = PTR_ERR(pages); + goto out; + } + + /* + * throw out any page cache pages in this range. this + * may block. + */ + truncate_inode_pages_range(inode->i_mapping, pos, pos+len); + } else { + pages = alloc_page_vector(num_pages); + if (IS_ERR(pages)) { + ret = PTR_ERR(pages); + goto out; + } + ret = copy_user_to_page_vector(pages, data, pos, len); + if (ret < 0) { + ceph_release_page_vector(pages, num_pages); + goto out; + } + + if ((file->f_flags & O_SYNC) == 0) { + /* get a second commit callback */ + req->r_safe_callback = sync_write_commit; + req->r_own_pages = 1; + } + } + req->r_pages = pages; + req->r_num_pages = num_pages; + req->r_inode = inode; + + ret = ceph_osdc_start_request(&client->osdc, req, false); + if (!ret) { + if (req->r_safe_callback) { + /* + * Add to inode unsafe list only after we + * start_request so that a tid has been assigned. + */ + spin_lock(&ci->i_unsafe_lock); + list_add(&ci->i_unsafe_writes, &req->r_unsafe_item); + spin_unlock(&ci->i_unsafe_lock); + ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR); + } + ret = ceph_osdc_wait_request(&client->osdc, req); + } + + if (file->f_flags & O_DIRECT) + put_page_vector(pages, num_pages); + else if (file->f_flags & O_SYNC) + ceph_release_page_vector(pages, num_pages); + +out: + ceph_osdc_put_request(req); + if (ret == 0) { + pos += len; + written += len; + left -= len; + if (left) + goto more; + + ret = written; + *offset = pos; + if (pos > i_size_read(inode)) + check_caps = ceph_inode_set_size(inode, pos); + if (check_caps) + ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, + NULL); + } + return ret; +} + +/* + * Wrap generic_file_aio_read with checks for cap bits on the inode. + * Atomically grab references, so that those bits are not released + * back to the MDS mid-read. + * + * Hmm, the sync read case isn't actually async... should it be? + */ +static ssize_t ceph_aio_read(struct kiocb *iocb, const struct iovec *iov, + unsigned long nr_segs, loff_t pos) +{ + struct file *filp = iocb->ki_filp; + loff_t *ppos = &iocb->ki_pos; + size_t len = iov->iov_len; + struct inode *inode = filp->f_dentry->d_inode; + struct ceph_inode_info *ci = ceph_inode(inode); + ssize_t ret; + int got = 0; + + dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n", + inode, ceph_vinop(inode), pos, (unsigned)len, inode); + __ceph_do_pending_vmtruncate(inode); + ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, CEPH_CAP_FILE_CACHE, + &got, -1); + if (ret < 0) + goto out; + dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n", + inode, ceph_vinop(inode), pos, (unsigned)len, + ceph_cap_string(got)); + + if ((got & CEPH_CAP_FILE_CACHE) == 0 || + (iocb->ki_filp->f_flags & O_DIRECT) || + (inode->i_sb->s_flags & MS_SYNCHRONOUS)) + /* hmm, this isn't really async... */ + ret = ceph_sync_read(filp, iov->iov_base, len, ppos); + else + ret = generic_file_aio_read(iocb, iov, nr_segs, pos); + +out: + dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n", + inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret); + ceph_put_cap_refs(ci, got); + return ret; +} + +/* + * Take cap references to avoid releasing caps to MDS mid-write. + * + * If we are synchronous, and write with an old snap context, the OSD + * may return EOLDSNAPC. In that case, retry the write.. _after_ + * dropping our cap refs and allowing the pending snap to logically + * complete _before_ this write occurs. + * + * If we are near ENOSPC, write synchronously. + */ +static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov, + unsigned long nr_segs, loff_t pos) +{ + struct file *file = iocb->ki_filp; + struct inode *inode = file->f_dentry->d_inode; + struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_osd_client *osdc = &ceph_client(inode->i_sb)->osdc; + loff_t endoff = pos + iov->iov_len; + int got = 0; + int ret; + + if (ceph_snap(inode) != CEPH_NOSNAP) + return -EROFS; + +retry_snap: + if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) + return -ENOSPC; + __ceph_do_pending_vmtruncate(inode); + dout("aio_write %p %llx.%llx %llu~%u getting caps. i_size %llu\n", + inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len, + inode->i_size); + ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER, + &got, endoff); + if (ret < 0) + goto out; + + dout("aio_write %p %llx.%llx %llu~%u got cap refs on %s\n", + inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len, + ceph_cap_string(got)); + + if ((got & CEPH_CAP_FILE_BUFFER) == 0 || + (iocb->ki_filp->f_flags & O_DIRECT) || + (inode->i_sb->s_flags & MS_SYNCHRONOUS)) { + ret = ceph_sync_write(file, iov->iov_base, iov->iov_len, + &iocb->ki_pos); + } else { + ret = generic_file_aio_write(iocb, iov, nr_segs, pos); + + if ((ret >= 0 || ret == -EIOCBQUEUED) && + ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host) + || ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) + ret = vfs_fsync_range(file, file->f_path.dentry, + pos, pos + ret - 1, 1); + } + if (ret >= 0) { + spin_lock(&inode->i_lock); + __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR); + spin_unlock(&inode->i_lock); + } + +out: + dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n", + inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len, + ceph_cap_string(got)); + ceph_put_cap_refs(ci, got); + + if (ret == -EOLDSNAPC) { + dout("aio_write %p %llx.%llx %llu~%u got EOLDSNAPC, retrying\n", + inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len); + goto retry_snap; + } + + return ret; +} + +/* + * llseek. be sure to verify file size on SEEK_END. + */ +static loff_t ceph_llseek(struct file *file, loff_t offset, int origin) +{ + struct inode *inode = file->f_mapping->host; + int ret; + + mutex_lock(&inode->i_mutex); + __ceph_do_pending_vmtruncate(inode); + switch (origin) { + case SEEK_END: + ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE); + if (ret < 0) { + offset = ret; + goto out; + } + offset += inode->i_size; + break; + case SEEK_CUR: + /* + * Here we special-case the lseek(fd, 0, SEEK_CUR) + * position-querying operation. Avoid rewriting the "same" + * f_pos value back to the file because a concurrent read(), + * write() or lseek() might have altered it + */ + if (offset == 0) { + offset = file->f_pos; + goto out; + } + offset += file->f_pos; + break; + } + + if (offset < 0 || offset > inode->i_sb->s_maxbytes) { + offset = -EINVAL; + goto out; + } + + /* Special lock needed here? */ + if (offset != file->f_pos) { + file->f_pos = offset; + file->f_version = 0; + } + +out: + mutex_unlock(&inode->i_mutex); + return offset; +} + +const struct file_operations ceph_file_fops = { + .open = ceph_open, + .release = ceph_release, + .llseek = ceph_llseek, + .read = do_sync_read, + .write = do_sync_write, + .aio_read = ceph_aio_read, + .aio_write = ceph_aio_write, + .mmap = ceph_mmap, + .fsync = ceph_fsync, + .splice_read = generic_file_splice_read, + .splice_write = generic_file_splice_write, + .unlocked_ioctl = ceph_ioctl, + .compat_ioctl = ceph_ioctl, +}; + -- cgit v1.2.3 From 1d3576fd10f0d7a104204267b81cf84a07028dad Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 6 Oct 2009 11:31:09 -0700 Subject: ceph: address space operations The ceph address space methods are concerned primarily with managing the dirty page accounting in the inode, which (among other things) must keep track of which snapshot context each page was dirtied in, and ensure that dirty data is written out to the OSDs in snapshort order. A writepage() on a page that is not currently writeable due to snapshot writeback ordering constraints is ignored (it was presumably called from kswapd). Signed-off-by: Sage Weil --- fs/ceph/addr.c | 1115 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 1115 insertions(+) create mode 100644 fs/ceph/addr.c diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c new file mode 100644 index 000000000000..c7d673ffe023 --- /dev/null +++ b/fs/ceph/addr.c @@ -0,0 +1,1115 @@ +#include "ceph_debug.h" + +#include +#include +#include +#include +#include /* generic_writepages */ +#include +#include + +#include "super.h" +#include "osd_client.h" + +/* + * Ceph address space ops. + * + * There are a few funny things going on here. + * + * The page->private field is used to reference a struct + * ceph_snap_context for _every_ dirty page. This indicates which + * snapshot the page was logically dirtied in, and thus which snap + * context needs to be associated with the osd write during writeback. + * + * Similarly, struct ceph_inode_info maintains a set of counters to + * count dirty pages on the inode. In the absense of snapshots, + * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count. + * + * When a snapshot is taken (that is, when the client receives + * notification that a snapshot was taken), each inode with caps and + * with dirty pages (dirty pages implies there is a cap) gets a new + * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending + * order, new snaps go to the tail). The i_wrbuffer_ref_head count is + * moved to capsnap->dirty. (Unless a sync write is currently in + * progress. In that case, the capsnap is said to be "pending", new + * writes cannot start, and the capsnap isn't "finalized" until the + * write completes (or fails) and a final size/mtime for the inode for + * that snap can be settled upon.) i_wrbuffer_ref_head is reset to 0. + * + * On writeback, we must submit writes to the osd IN SNAP ORDER. So, + * we look for the first capsnap in i_cap_snaps and write out pages in + * that snap context _only_. Then we move on to the next capsnap, + * eventually reaching the "live" or "head" context (i.e., pages that + * are not yet snapped) and are writing the most recently dirtied + * pages. + * + * Invalidate and so forth must take care to ensure the dirty page + * accounting is preserved. + */ + + +/* + * Dirty a page. Optimistically adjust accounting, on the assumption + * that we won't race with invalidate. If we do, readjust. + */ +static int ceph_set_page_dirty(struct page *page) +{ + struct address_space *mapping = page->mapping; + struct inode *inode; + struct ceph_inode_info *ci; + int undo = 0; + struct ceph_snap_context *snapc; + + if (unlikely(!mapping)) + return !TestSetPageDirty(page); + + if (TestSetPageDirty(page)) { + dout("%p set_page_dirty %p idx %lu -- already dirty\n", + mapping->host, page, page->index); + return 0; + } + + inode = mapping->host; + ci = ceph_inode(inode); + + /* + * Note that we're grabbing a snapc ref here without holding + * any locks! + */ + snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context); + + /* dirty the head */ + spin_lock(&inode->i_lock); + if (ci->i_wrbuffer_ref_head == 0) + ci->i_head_snapc = ceph_get_snap_context(snapc); + ++ci->i_wrbuffer_ref_head; + if (ci->i_wrbuffer_ref == 0) + igrab(inode); + ++ci->i_wrbuffer_ref; + dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d " + "snapc %p seq %lld (%d snaps)\n", + mapping->host, page, page->index, + ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1, + ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head, + snapc, snapc->seq, snapc->num_snaps); + spin_unlock(&inode->i_lock); + + /* now adjust page */ + spin_lock_irq(&mapping->tree_lock); + if (page->mapping) { /* Race with truncate? */ + WARN_ON_ONCE(!PageUptodate(page)); + + if (mapping_cap_account_dirty(mapping)) { + __inc_zone_page_state(page, NR_FILE_DIRTY); + __inc_bdi_stat(mapping->backing_dev_info, + BDI_RECLAIMABLE); + task_io_account_write(PAGE_CACHE_SIZE); + } + radix_tree_tag_set(&mapping->page_tree, + page_index(page), PAGECACHE_TAG_DIRTY); + + /* + * Reference snap context in page->private. Also set + * PagePrivate so that we get invalidatepage callback. + */ + page->private = (unsigned long)snapc; + SetPagePrivate(page); + } else { + dout("ANON set_page_dirty %p (raced truncate?)\n", page); + undo = 1; + } + + spin_unlock_irq(&mapping->tree_lock); + + if (undo) + /* whoops, we failed to dirty the page */ + ceph_put_wrbuffer_cap_refs(ci, 1, snapc); + + __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); + + BUG_ON(!PageDirty(page)); + return 1; +} + +/* + * If we are truncating the full page (i.e. offset == 0), adjust the + * dirty page counters appropriately. Only called if there is private + * data on the page. + */ +static void ceph_invalidatepage(struct page *page, unsigned long offset) +{ + struct inode *inode = page->mapping->host; + struct ceph_inode_info *ci; + struct ceph_snap_context *snapc = (void *)page->private; + + BUG_ON(!PageLocked(page)); + BUG_ON(!page->private); + BUG_ON(!PagePrivate(page)); + BUG_ON(!page->mapping); + + /* + * We can get non-dirty pages here due to races between + * set_page_dirty and truncate_complete_page; just spit out a + * warning, in case we end up with accounting problems later. + */ + if (!PageDirty(page)) + pr_err("%p invalidatepage %p page not dirty\n", inode, page); + + if (offset == 0) + ClearPageChecked(page); + + ci = ceph_inode(inode); + if (offset == 0) { + dout("%p invalidatepage %p idx %lu full dirty page %lu\n", + inode, page, page->index, offset); + ceph_put_wrbuffer_cap_refs(ci, 1, snapc); + ceph_put_snap_context(snapc); + page->private = 0; + ClearPagePrivate(page); + } else { + dout("%p invalidatepage %p idx %lu partial dirty page\n", + inode, page, page->index); + } +} + +/* just a sanity check */ +static int ceph_releasepage(struct page *page, gfp_t g) +{ + struct inode *inode = page->mapping ? page->mapping->host : NULL; + dout("%p releasepage %p idx %lu\n", inode, page, page->index); + WARN_ON(PageDirty(page)); + WARN_ON(page->private); + WARN_ON(PagePrivate(page)); + return 0; +} + +/* + * read a single page, without unlocking it. + */ +static int readpage_nounlock(struct file *filp, struct page *page) +{ + struct inode *inode = filp->f_dentry->d_inode; + struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_osd_client *osdc = &ceph_inode_to_client(inode)->osdc; + int err = 0; + u64 len = PAGE_CACHE_SIZE; + + dout("readpage inode %p file %p page %p index %lu\n", + inode, filp, page, page->index); + err = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout, + page->index << PAGE_CACHE_SHIFT, &len, + ci->i_truncate_seq, ci->i_truncate_size, + &page, 1); + if (err == -ENOENT) + err = 0; + if (err < 0) { + SetPageError(page); + goto out; + } else if (err < PAGE_CACHE_SIZE) { + /* zero fill remainder of page */ + zero_user_segment(page, err, PAGE_CACHE_SIZE); + } + SetPageUptodate(page); + +out: + return err < 0 ? err : 0; +} + +static int ceph_readpage(struct file *filp, struct page *page) +{ + int r = readpage_nounlock(filp, page); + unlock_page(page); + return r; +} + +/* + * Build a vector of contiguous pages from the provided page list. + */ +static struct page **page_vector_from_list(struct list_head *page_list, + unsigned *nr_pages) +{ + struct page **pages; + struct page *page; + int next_index, contig_pages = 0; + + /* build page vector */ + pages = kmalloc(sizeof(*pages) * *nr_pages, GFP_NOFS); + if (!pages) + return ERR_PTR(-ENOMEM); + + BUG_ON(list_empty(page_list)); + next_index = list_entry(page_list->prev, struct page, lru)->index; + list_for_each_entry_reverse(page, page_list, lru) { + if (page->index == next_index) { + dout("readpages page %d %p\n", contig_pages, page); + pages[contig_pages] = page; + contig_pages++; + next_index++; + } else { + break; + } + } + *nr_pages = contig_pages; + return pages; +} + +/* + * Read multiple pages. Leave pages we don't read + unlock in page_list; + * the caller (VM) cleans them up. + */ +static int ceph_readpages(struct file *file, struct address_space *mapping, + struct list_head *page_list, unsigned nr_pages) +{ + struct inode *inode = file->f_dentry->d_inode; + struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_osd_client *osdc = &ceph_inode_to_client(inode)->osdc; + int rc = 0; + struct page **pages; + struct pagevec pvec; + loff_t offset; + u64 len; + + dout("readpages %p file %p nr_pages %d\n", + inode, file, nr_pages); + + pages = page_vector_from_list(page_list, &nr_pages); + if (IS_ERR(pages)) + return PTR_ERR(pages); + + /* guess read extent */ + offset = pages[0]->index << PAGE_CACHE_SHIFT; + len = nr_pages << PAGE_CACHE_SHIFT; + rc = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout, + offset, &len, + ci->i_truncate_seq, ci->i_truncate_size, + pages, nr_pages); + if (rc == -ENOENT) + rc = 0; + if (rc < 0) + goto out; + + /* set uptodate and add to lru in pagevec-sized chunks */ + pagevec_init(&pvec, 0); + for (; !list_empty(page_list) && len > 0; + rc -= PAGE_CACHE_SIZE, len -= PAGE_CACHE_SIZE) { + struct page *page = + list_entry(page_list->prev, struct page, lru); + + list_del(&page->lru); + + if (rc < (int)PAGE_CACHE_SIZE) { + /* zero (remainder of) page */ + int s = rc < 0 ? 0 : rc; + zero_user_segment(page, s, PAGE_CACHE_SIZE); + } + + if (add_to_page_cache(page, mapping, page->index, GFP_NOFS)) { + page_cache_release(page); + dout("readpages %p add_to_page_cache failed %p\n", + inode, page); + continue; + } + dout("readpages %p adding %p idx %lu\n", inode, page, + page->index); + flush_dcache_page(page); + SetPageUptodate(page); + unlock_page(page); + if (pagevec_add(&pvec, page) == 0) + pagevec_lru_add_file(&pvec); /* add to lru */ + } + pagevec_lru_add_file(&pvec); + rc = 0; + +out: + kfree(pages); + return rc; +} + +/* + * Get ref for the oldest snapc for an inode with dirty data... that is, the + * only snap context we are allowed to write back. + * + * Caller holds i_lock. + */ +static struct ceph_snap_context *__get_oldest_context(struct inode *inode, + u64 *snap_size) +{ + struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_snap_context *snapc = NULL; + struct ceph_cap_snap *capsnap = NULL; + + list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { + dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap, + capsnap->context, capsnap->dirty_pages); + if (capsnap->dirty_pages) { + snapc = ceph_get_snap_context(capsnap->context); + if (snap_size) + *snap_size = capsnap->size; + break; + } + } + if (!snapc && ci->i_snap_realm) { + snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context); + dout(" head snapc %p has %d dirty pages\n", + snapc, ci->i_wrbuffer_ref_head); + } + return snapc; +} + +static struct ceph_snap_context *get_oldest_context(struct inode *inode, + u64 *snap_size) +{ + struct ceph_snap_context *snapc = NULL; + + spin_lock(&inode->i_lock); + snapc = __get_oldest_context(inode, snap_size); + spin_unlock(&inode->i_lock); + return snapc; +} + +/* + * Write a single page, but leave the page locked. + * + * If we get a write error, set the page error bit, but still adjust the + * dirty page accounting (i.e., page is no longer dirty). + */ +static int writepage_nounlock(struct page *page, struct writeback_control *wbc) +{ + struct inode *inode; + struct ceph_inode_info *ci; + struct ceph_osd_client *osdc; + loff_t page_off = page->index << PAGE_CACHE_SHIFT; + int len = PAGE_CACHE_SIZE; + loff_t i_size; + int err = 0; + struct ceph_snap_context *snapc; + u64 snap_size = 0; + + dout("writepage %p idx %lu\n", page, page->index); + + if (!page->mapping || !page->mapping->host) { + dout("writepage %p - no mapping\n", page); + return -EFAULT; + } + inode = page->mapping->host; + ci = ceph_inode(inode); + osdc = &ceph_inode_to_client(inode)->osdc; + + /* verify this is a writeable snap context */ + snapc = (void *)page->private; + if (snapc == NULL) { + dout("writepage %p page %p not dirty?\n", inode, page); + goto out; + } + if (snapc != get_oldest_context(inode, &snap_size)) { + dout("writepage %p page %p snapc %p not writeable - noop\n", + inode, page, (void *)page->private); + /* we should only noop if called by kswapd */ + WARN_ON((current->flags & PF_MEMALLOC) == 0); + goto out; + } + + /* is this a partial page at end of file? */ + if (snap_size) + i_size = snap_size; + else + i_size = i_size_read(inode); + if (i_size < page_off + len) + len = i_size - page_off; + + dout("writepage %p page %p index %lu on %llu~%u\n", + inode, page, page->index, page_off, len); + + set_page_writeback(page); + err = ceph_osdc_writepages(osdc, ceph_vino(inode), + &ci->i_layout, snapc, + page_off, len, + ci->i_truncate_seq, ci->i_truncate_size, + &inode->i_mtime, + &page, 1, 0, 0, true); + if (err < 0) { + dout("writepage setting page/mapping error %d %p\n", err, page); + SetPageError(page); + mapping_set_error(&inode->i_data, err); + if (wbc) + wbc->pages_skipped++; + } else { + dout("writepage cleaned page %p\n", page); + err = 0; /* vfs expects us to return 0 */ + } + page->private = 0; + ClearPagePrivate(page); + end_page_writeback(page); + ceph_put_wrbuffer_cap_refs(ci, 1, snapc); + ceph_put_snap_context(snapc); +out: + return err; +} + +static int ceph_writepage(struct page *page, struct writeback_control *wbc) +{ + int err = writepage_nounlock(page, wbc); + unlock_page(page); + return err; +} + + +/* + * lame release_pages helper. release_pages() isn't exported to + * modules. + */ +static void ceph_release_pages(struct page **pages, int num) +{ + struct pagevec pvec; + int i; + + pagevec_init(&pvec, 0); + for (i = 0; i < num; i++) { + if (pagevec_add(&pvec, pages[i]) == 0) + pagevec_release(&pvec); + } + pagevec_release(&pvec); +} + + +/* + * async writeback completion handler. + * + * If we get an error, set the mapping error bit, but not the individual + * page error bits. + */ +static void writepages_finish(struct ceph_osd_request *req, + struct ceph_msg *msg) +{ + struct inode *inode = req->r_inode; + struct ceph_osd_reply_head *replyhead; + struct ceph_osd_op *op; + struct ceph_inode_info *ci = ceph_inode(inode); + unsigned wrote; + loff_t offset = req->r_pages[0]->index << PAGE_CACHE_SHIFT; + struct page *page; + int i; + struct ceph_snap_context *snapc = req->r_snapc; + struct address_space *mapping = inode->i_mapping; + struct writeback_control *wbc = req->r_wbc; + __s32 rc = -EIO; + u64 bytes = 0; + + /* parse reply */ + replyhead = msg->front.iov_base; + WARN_ON(le32_to_cpu(replyhead->num_ops) == 0); + op = (void *)(replyhead + 1); + rc = le32_to_cpu(replyhead->result); + bytes = le64_to_cpu(op->extent.length); + + if (rc >= 0) { + wrote = (bytes + (offset & ~PAGE_CACHE_MASK) + ~PAGE_CACHE_MASK) + >> PAGE_CACHE_SHIFT; + WARN_ON(wrote != req->r_num_pages); + } else { + wrote = 0; + mapping_set_error(mapping, rc); + } + dout("writepages_finish %p rc %d bytes %llu wrote %d (pages)\n", + inode, rc, bytes, wrote); + + /* clean all pages */ + for (i = 0; i < req->r_num_pages; i++) { + page = req->r_pages[i]; + BUG_ON(!page); + WARN_ON(!PageUptodate(page)); + + if (i >= wrote) { + dout("inode %p skipping page %p\n", inode, page); + wbc->pages_skipped++; + } + page->private = 0; + ClearPagePrivate(page); + ceph_put_snap_context(snapc); + dout("unlocking %d %p\n", i, page); + end_page_writeback(page); + unlock_page(page); + } + dout("%p wrote+cleaned %d pages\n", inode, wrote); + ceph_put_wrbuffer_cap_refs(ci, req->r_num_pages, snapc); + + ceph_release_pages(req->r_pages, req->r_num_pages); + if (req->r_pages_from_pool) + mempool_free(req->r_pages, + ceph_client(inode->i_sb)->wb_pagevec_pool); + else + kfree(req->r_pages); + ceph_osdc_put_request(req); +} + +/* + * allocate a page vec, either directly, or if necessary, via a the + * mempool. we avoid the mempool if we can because req->r_num_pages + * may be less than the maximum write size. + */ +static void alloc_page_vec(struct ceph_client *client, + struct ceph_osd_request *req) +{ + req->r_pages = kmalloc(sizeof(struct page *) * req->r_num_pages, + GFP_NOFS); + if (!req->r_pages) { + req->r_pages = mempool_alloc(client->wb_pagevec_pool, GFP_NOFS); + req->r_pages_from_pool = 1; + WARN_ON(!req->r_pages); + } +} + +/* + * initiate async writeback + */ +static int ceph_writepages_start(struct address_space *mapping, + struct writeback_control *wbc) +{ + struct inode *inode = mapping->host; + struct backing_dev_info *bdi = mapping->backing_dev_info; + struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_client *client = ceph_inode_to_client(inode); + pgoff_t index, start, end; + int range_whole = 0; + int should_loop = 1; + pgoff_t max_pages = 0, max_pages_ever = 0; + struct ceph_snap_context *snapc = NULL, *last_snapc = NULL; + struct pagevec pvec; + int done = 0; + int rc = 0; + unsigned wsize = 1 << inode->i_blkbits; + struct ceph_osd_request *req = NULL; + int do_sync; + u64 snap_size = 0; + + /* + * Include a 'sync' in the OSD request if this is a data + * integrity write (e.g., O_SYNC write or fsync()), or if our + * cap is being revoked. + */ + do_sync = wbc->sync_mode == WB_SYNC_ALL; + if (ceph_caps_revoking(ci, CEPH_CAP_FILE_BUFFER)) + do_sync = 1; + dout("writepages_start %p dosync=%d (mode=%s)\n", + inode, do_sync, + wbc->sync_mode == WB_SYNC_NONE ? "NONE" : + (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD")); + + client = ceph_inode_to_client(inode); + if (client->mount_state == CEPH_MOUNT_SHUTDOWN) { + pr_warning("writepage_start %p on forced umount\n", inode); + return -EIO; /* we're in a forced umount, don't write! */ + } + if (client->mount_args.wsize && client->mount_args.wsize < wsize) + wsize = client->mount_args.wsize; + if (wsize < PAGE_CACHE_SIZE) + wsize = PAGE_CACHE_SIZE; + max_pages_ever = wsize >> PAGE_CACHE_SHIFT; + + pagevec_init(&pvec, 0); + + /* ?? */ + if (wbc->nonblocking && bdi_write_congested(bdi)) { + dout(" writepages congested\n"); + wbc->encountered_congestion = 1; + goto out_final; + } + + /* where to start/end? */ + if (wbc->range_cyclic) { + start = mapping->writeback_index; /* Start from prev offset */ + end = -1; + dout(" cyclic, start at %lu\n", start); + } else { + start = wbc->range_start >> PAGE_CACHE_SHIFT; + end = wbc->range_end >> PAGE_CACHE_SHIFT; + if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) + range_whole = 1; + should_loop = 0; + dout(" not cyclic, %lu to %lu\n", start, end); + } + index = start; + +retry: + /* find oldest snap context with dirty data */ + ceph_put_snap_context(snapc); + snapc = get_oldest_context(inode, &snap_size); + if (!snapc) { + /* hmm, why does writepages get called when there + is no dirty data? */ + dout(" no snap context with dirty data?\n"); + goto out; + } + dout(" oldest snapc is %p seq %lld (%d snaps)\n", + snapc, snapc->seq, snapc->num_snaps); + if (last_snapc && snapc != last_snapc) { + /* if we switched to a newer snapc, restart our scan at the + * start of the original file range. */ + dout(" snapc differs from last pass, restarting at %lu\n", + index); + index = start; + } + last_snapc = snapc; + + while (!done && index <= end) { + unsigned i; + int first; + pgoff_t next; + int pvec_pages, locked_pages; + struct page *page; + int want; + u64 offset, len; + struct ceph_osd_request_head *reqhead; + struct ceph_osd_op *op; + + next = 0; + locked_pages = 0; + max_pages = max_pages_ever; + +get_more_pages: + first = -1; + want = min(end - index, + min((pgoff_t)PAGEVEC_SIZE, + max_pages - (pgoff_t)locked_pages) - 1) + + 1; + pvec_pages = pagevec_lookup_tag(&pvec, mapping, &index, + PAGECACHE_TAG_DIRTY, + want); + dout("pagevec_lookup_tag got %d\n", pvec_pages); + if (!pvec_pages && !locked_pages) + break; + for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) { + page = pvec.pages[i]; + dout("? %p idx %lu\n", page, page->index); + if (locked_pages == 0) + lock_page(page); /* first page */ + else if (!trylock_page(page)) + break; + + /* only dirty pages, or our accounting breaks */ + if (unlikely(!PageDirty(page)) || + unlikely(page->mapping != mapping)) { + dout("!dirty or !mapping %p\n", page); + unlock_page(page); + break; + } + if (!wbc->range_cyclic && page->index > end) { + dout("end of range %p\n", page); + done = 1; + unlock_page(page); + break; + } + if (next && (page->index != next)) { + dout("not consecutive %p\n", page); + unlock_page(page); + break; + } + if (wbc->sync_mode != WB_SYNC_NONE) { + dout("waiting on writeback %p\n", page); + wait_on_page_writeback(page); + } + if ((snap_size && page_offset(page) > snap_size) || + (!snap_size && + page_offset(page) > i_size_read(inode))) { + dout("%p page eof %llu\n", page, snap_size ? + snap_size : i_size_read(inode)); + done = 1; + unlock_page(page); + break; + } + if (PageWriteback(page)) { + dout("%p under writeback\n", page); + unlock_page(page); + break; + } + + /* only if matching snap context */ + if (snapc != (void *)page->private) { + dout("page snapc %p != oldest %p\n", + (void *)page->private, snapc); + unlock_page(page); + if (!locked_pages) + continue; /* keep looking for snap */ + break; + } + + if (!clear_page_dirty_for_io(page)) { + dout("%p !clear_page_dirty_for_io\n", page); + unlock_page(page); + break; + } + + /* ok */ + if (locked_pages == 0) { + /* prepare async write request */ + offset = page->index << PAGE_CACHE_SHIFT; + len = wsize; + req = ceph_osdc_new_request(&client->osdc, + &ci->i_layout, + ceph_vino(inode), + offset, &len, + CEPH_OSD_OP_WRITE, + CEPH_OSD_FLAG_WRITE | + CEPH_OSD_FLAG_ONDISK, + snapc, do_sync, + ci->i_truncate_seq, + ci->i_truncate_size, + &inode->i_mtime, true, 1); + max_pages = req->r_num_pages; + + alloc_page_vec(client, req); + req->r_callback = writepages_finish; + req->r_inode = inode; + req->r_wbc = wbc; + } + + /* note position of first page in pvec */ + if (first < 0) + first = i; + dout("%p will write page %p idx %lu\n", + inode, page, page->index); + set_page_writeback(page); + req->r_pages[locked_pages] = page; + locked_pages++; + next = page->index + 1; + } + + /* did we get anything? */ + if (!locked_pages) + goto release_pvec_pages; + if (i) { + int j; + BUG_ON(!locked_pages || first < 0); + + if (pvec_pages && i == pvec_pages && + locked_pages < max_pages) { + dout("reached end pvec, trying for more\n"); + pagevec_reinit(&pvec); + goto get_more_pages; + } + + /* shift unused pages over in the pvec... we + * will need to release them below. */ + for (j = i; j < pvec_pages; j++) { + dout(" pvec leftover page %p\n", + pvec.pages[j]); + pvec.pages[j-i+first] = pvec.pages[j]; + } + pvec.nr -= i-first; + } + + /* submit the write */ + offset = req->r_pages[0]->index << PAGE_CACHE_SHIFT; + len = min((snap_size ? snap_size : i_size_read(inode)) - offset, + (u64)locked_pages << PAGE_CACHE_SHIFT); + dout("writepages got %d pages at %llu~%llu\n", + locked_pages, offset, len); + + /* revise final length, page count */ + req->r_num_pages = locked_pages; + reqhead = req->r_request->front.iov_base; + op = (void *)(reqhead + 1); + op->extent.length = cpu_to_le64(len); + op->payload_len = cpu_to_le32(len); + req->r_request->hdr.data_len = cpu_to_le32(len); + + ceph_osdc_start_request(&client->osdc, req, true); + req = NULL; + + /* continue? */ + index = next; + wbc->nr_to_write -= locked_pages; + if (wbc->nr_to_write <= 0) + done = 1; + +release_pvec_pages: + dout("pagevec_release on %d pages (%p)\n", (int)pvec.nr, + pvec.nr ? pvec.pages[0] : NULL); + pagevec_release(&pvec); + + if (locked_pages && !done) + goto retry; + } + + if (should_loop && !done) { + /* more to do; loop back to beginning of file */ + dout("writepages looping back to beginning of file\n"); + should_loop = 0; + index = 0; + goto retry; + } + + if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) + mapping->writeback_index = index; + +out: + if (req) + ceph_osdc_put_request(req); + if (rc > 0) + rc = 0; /* vfs expects us to return 0 */ + ceph_put_snap_context(snapc); + dout("writepages done, rc = %d\n", rc); +out_final: + return rc; +} + + + +/* + * See if a given @snapc is either writeable, or already written. + */ +static int context_is_writeable_or_written(struct inode *inode, + struct ceph_snap_context *snapc) +{ + struct ceph_snap_context *oldest = get_oldest_context(inode, NULL); + return !oldest || snapc->seq <= oldest->seq; +} + +/* + * We are only allowed to write into/dirty the page if the page is + * clean, or already dirty within the same snap context. + */ +static int ceph_write_begin(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata) +{ + struct inode *inode = file->f_dentry->d_inode; + struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc; + struct page *page; + pgoff_t index = pos >> PAGE_CACHE_SHIFT; + loff_t page_off = pos & PAGE_CACHE_MASK; + int pos_in_page = pos & ~PAGE_CACHE_MASK; + int end_in_page = pos_in_page + len; + loff_t i_size; + struct ceph_snap_context *snapc; + int r; + + /* get a page*/ +retry: + page = grab_cache_page_write_begin(mapping, index, 0); + if (!page) + return -ENOMEM; + *pagep = page; + + dout("write_begin file %p inode %p page %p %d~%d\n", file, + inode, page, (int)pos, (int)len); + +retry_locked: + /* writepages currently holds page lock, but if we change that later, */ + wait_on_page_writeback(page); + + /* check snap context */ + BUG_ON(!ci->i_snap_realm); + down_read(&mdsc->snap_rwsem); + BUG_ON(!ci->i_snap_realm->cached_context); + if (page->private && + (void *)page->private != ci->i_snap_realm->cached_context) { + /* + * this page is already dirty in another (older) snap + * context! is it writeable now? + */ + snapc = get_oldest_context(inode, NULL); + up_read(&mdsc->snap_rwsem); + + if (snapc != (void *)page->private) { + dout(" page %p snapc %p not current or oldest\n", + page, (void *)page->private); + /* + * queue for writeback, and wait for snapc to + * be writeable or written + */ + snapc = ceph_get_snap_context((void *)page->private); + unlock_page(page); + if (ceph_queue_writeback(inode)) + igrab(inode); + wait_event_interruptible(ci->i_cap_wq, + context_is_writeable_or_written(inode, snapc)); + ceph_put_snap_context(snapc); + goto retry; + } + + /* yay, writeable, do it now (without dropping page lock) */ + dout(" page %p snapc %p not current, but oldest\n", + page, snapc); + if (!clear_page_dirty_for_io(page)) + goto retry_locked; + r = writepage_nounlock(page, NULL); + if (r < 0) + goto fail_nosnap; + goto retry_locked; + } + + if (PageUptodate(page)) { + dout(" page %p already uptodate\n", page); + return 0; + } + + /* full page? */ + if (pos_in_page == 0 && len == PAGE_CACHE_SIZE) + return 0; + + /* past end of file? */ + i_size = inode->i_size; /* caller holds i_mutex */ + + if (i_size + len > inode->i_sb->s_maxbytes) { + /* file is too big */ + r = -EINVAL; + goto fail; + } + + if (page_off >= i_size || + (pos_in_page == 0 && (pos+len) >= i_size && + end_in_page - pos_in_page != PAGE_CACHE_SIZE)) { + dout(" zeroing %p 0 - %d and %d - %d\n", + page, pos_in_page, end_in_page, (int)PAGE_CACHE_SIZE); + zero_user_segments(page, + 0, pos_in_page, + end_in_page, PAGE_CACHE_SIZE); + return 0; + } + + /* we need to read it. */ + up_read(&mdsc->snap_rwsem); + r = readpage_nounlock(file, page); + if (r < 0) + goto fail_nosnap; + goto retry_locked; + +fail: + up_read(&mdsc->snap_rwsem); +fail_nosnap: + unlock_page(page); + return r; +} + +/* + * we don't do anything in here that simple_write_end doesn't do + * except adjust dirty page accounting and drop read lock on + * mdsc->snap_rwsem. + */ +static int ceph_write_end(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata) +{ + struct inode *inode = file->f_dentry->d_inode; + struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc; + unsigned from = pos & (PAGE_CACHE_SIZE - 1); + int check_cap = 0; + + dout("write_end file %p inode %p page %p %d~%d (%d)\n", file, + inode, page, (int)pos, (int)copied, (int)len); + + /* zero the stale part of the page if we did a short copy */ + if (copied < len) + zero_user_segment(page, from+copied, len); + + /* did file size increase? */ + /* (no need for i_size_read(); we caller holds i_mutex */ + if (pos+copied > inode->i_size) + check_cap = ceph_inode_set_size(inode, pos+copied); + + if (!PageUptodate(page)) + SetPageUptodate(page); + + set_page_dirty(page); + + unlock_page(page); + up_read(&mdsc->snap_rwsem); + page_cache_release(page); + + if (check_cap) + ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL); + + return copied; +} + +/* + * we set .direct_IO to indicate direct io is supported, but since we + * intercept O_DIRECT reads and writes early, this function should + * never get called. + */ +static ssize_t ceph_direct_io(int rw, struct kiocb *iocb, + const struct iovec *iov, + loff_t pos, unsigned long nr_segs) +{ + WARN_ON(1); + return -EINVAL; +} + +const struct address_space_operations ceph_aops = { + .readpage = ceph_readpage, + .readpages = ceph_readpages, + .writepage = ceph_writepage, + .writepages = ceph_writepages_start, + .write_begin = ceph_write_begin, + .write_end = ceph_write_end, + .set_page_dirty = ceph_set_page_dirty, + .invalidatepage = ceph_invalidatepage, + .releasepage = ceph_releasepage, + .direct_IO = ceph_direct_io, +}; + + +/* + * vm ops + */ + +/* + * Reuse write_begin here for simplicity. + */ +static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct inode *inode = vma->vm_file->f_dentry->d_inode; + struct page *page = vmf->page; + struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc; + loff_t off = page->index << PAGE_CACHE_SHIFT; + loff_t size, len; + struct page *locked_page = NULL; + void *fsdata = NULL; + int ret; + + size = i_size_read(inode); + if (off + PAGE_CACHE_SIZE <= size) + len = PAGE_CACHE_SIZE; + else + len = size & ~PAGE_CACHE_MASK; + + dout("page_mkwrite %p %llu~%llu page %p idx %lu\n", inode, + off, len, page, page->index); + ret = ceph_write_begin(vma->vm_file, inode->i_mapping, off, len, 0, + &locked_page, &fsdata); + WARN_ON(page != locked_page); + if (!ret) { + /* + * doing the following, instead of calling + * ceph_write_end. Note that we keep the + * page locked + */ + set_page_dirty(page); + up_read(&mdsc->snap_rwsem); + page_cache_release(page); + ret = VM_FAULT_LOCKED; + } else { + ret = VM_FAULT_SIGBUS; + } + dout("page_mkwrite %p %llu~%llu = %d\n", inode, off, len, ret); + return ret; +} + +static struct vm_operations_struct ceph_vmops = { + .fault = filemap_fault, + .page_mkwrite = ceph_page_mkwrite, +}; + +int ceph_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct address_space *mapping = file->f_mapping; + + if (!mapping->a_ops->readpage) + return -ENOEXEC; + file_accessed(file); + vma->vm_ops = &ceph_vmops; + vma->vm_flags |= VM_CAN_NONLINEAR; + return 0; +} -- cgit v1.2.3 From 2f2dc053404febedc9c273452d9d518fb31fde72 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 6 Oct 2009 11:31:09 -0700 Subject: ceph: MDS client The MDS (metadata server) client is responsible for submitting requests to the MDS cluster and parsing the response. We decide which MDS to submit each request to based on cached information about the current partition of the directory hierarchy across the cluster. A stateful session is opened with each MDS before we submit requests to it, and a mutex is used to control the ordering of messages within each session. An MDS request may generate two responses. The first indicates the operation was a success and returns any result. A second reply is sent when the operation commits to disk. Note that locking on the MDS ensures that the results of updates are visible only to the updating client before the operation commits. Requests are linked to the containing directory so that an fsync will wait for them to commit. If an MDS fails and/or recovers, we resubmit requests as needed. We also reconnect existing capabilities to a recovering MDS to reestablish that shared session state. Old dentry leases are invalidated. Signed-off-by: Sage Weil --- fs/ceph/mds_client.c | 2912 ++++++++++++++++++++++++++++++++++++++++++++++++++ fs/ceph/mds_client.h | 321 ++++++ fs/ceph/mdsmap.c | 166 +++ fs/ceph/mdsmap.h | 53 + 4 files changed, 3452 insertions(+) create mode 100644 fs/ceph/mds_client.c create mode 100644 fs/ceph/mds_client.h create mode 100644 fs/ceph/mdsmap.c create mode 100644 fs/ceph/mdsmap.h diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c new file mode 100644 index 000000000000..de8ba4a242ca --- /dev/null +++ b/fs/ceph/mds_client.c @@ -0,0 +1,2912 @@ +#include "ceph_debug.h" + +#include +#include + +#include "mds_client.h" +#include "mon_client.h" +#include "super.h" +#include "messenger.h" +#include "decode.h" + +/* + * A cluster of MDS (metadata server) daemons is responsible for + * managing the file system namespace (the directory hierarchy and + * inodes) and for coordinating shared access to storage. Metadata is + * partitioning hierarchically across a number of servers, and that + * partition varies over time as the cluster adjusts the distribution + * in order to balance load. + * + * The MDS client is primarily responsible to managing synchronous + * metadata requests for operations like open, unlink, and so forth. + * If there is a MDS failure, we find out about it when we (possibly + * request and) receive a new MDS map, and can resubmit affected + * requests. + * + * For the most part, though, we take advantage of a lossless + * communications channel to the MDS, and do not need to worry about + * timing out or resubmitting requests. + * + * We maintain a stateful "session" with each MDS we interact with. + * Within each session, we sent periodic heartbeat messages to ensure + * any capabilities or leases we have been issues remain valid. If + * the session times out and goes stale, our leases and capabilities + * are no longer valid. + */ + +static void __wake_requests(struct ceph_mds_client *mdsc, + struct list_head *head); + +const static struct ceph_connection_operations mds_con_ops; + + +/* + * mds reply parsing + */ + +/* + * parse individual inode info + */ +static int parse_reply_info_in(void **p, void *end, + struct ceph_mds_reply_info_in *info) +{ + int err = -EIO; + + info->in = *p; + *p += sizeof(struct ceph_mds_reply_inode) + + sizeof(*info->in->fragtree.splits) * + le32_to_cpu(info->in->fragtree.nsplits); + + ceph_decode_32_safe(p, end, info->symlink_len, bad); + ceph_decode_need(p, end, info->symlink_len, bad); + info->symlink = *p; + *p += info->symlink_len; + + ceph_decode_32_safe(p, end, info->xattr_len, bad); + ceph_decode_need(p, end, info->xattr_len, bad); + info->xattr_data = *p; + *p += info->xattr_len; + return 0; +bad: + return err; +} + +/* + * parse a normal reply, which may contain a (dir+)dentry and/or a + * target inode. + */ +static int parse_reply_info_trace(void **p, void *end, + struct ceph_mds_reply_info_parsed *info) +{ + int err; + + if (info->head->is_dentry) { + err = parse_reply_info_in(p, end, &info->diri); + if (err < 0) + goto out_bad; + + if (unlikely(*p + sizeof(*info->dirfrag) > end)) + goto bad; + info->dirfrag = *p; + *p += sizeof(*info->dirfrag) + + sizeof(u32)*le32_to_cpu(info->dirfrag->ndist); + if (unlikely(*p > end)) + goto bad; + + ceph_decode_32_safe(p, end, info->dname_len, bad); + ceph_decode_need(p, end, info->dname_len, bad); + info->dname = *p; + *p += info->dname_len; + info->dlease = *p; + *p += sizeof(*info->dlease); + } + + if (info->head->is_target) { + err = parse_reply_info_in(p, end, &info->targeti); + if (err < 0) + goto out_bad; + } + + if (unlikely(*p != end)) + goto bad; + return 0; + +bad: + err = -EIO; +out_bad: + pr_err("problem parsing mds trace %d\n", err); + return err; +} + +/* + * parse readdir results + */ +static int parse_reply_info_dir(void **p, void *end, + struct ceph_mds_reply_info_parsed *info) +{ + u32 num, i = 0; + int err; + + info->dir_dir = *p; + if (*p + sizeof(*info->dir_dir) > end) + goto bad; + *p += sizeof(*info->dir_dir) + + sizeof(u32)*le32_to_cpu(info->dir_dir->ndist); + if (*p > end) + goto bad; + + ceph_decode_need(p, end, sizeof(num) + 2, bad); + ceph_decode_32(p, num); + ceph_decode_8(p, info->dir_end); + ceph_decode_8(p, info->dir_complete); + if (num == 0) + goto done; + + /* alloc large array */ + info->dir_nr = num; + info->dir_in = kcalloc(num, sizeof(*info->dir_in) + + sizeof(*info->dir_dname) + + sizeof(*info->dir_dname_len) + + sizeof(*info->dir_dlease), + GFP_NOFS); + if (info->dir_in == NULL) { + err = -ENOMEM; + goto out_bad; + } + info->dir_dname = (void *)(info->dir_in + num); + info->dir_dname_len = (void *)(info->dir_dname + num); + info->dir_dlease = (void *)(info->dir_dname_len + num); + + while (num) { + /* dentry */ + ceph_decode_need(p, end, sizeof(u32)*2, bad); + ceph_decode_32(p, info->dir_dname_len[i]); + ceph_decode_need(p, end, info->dir_dname_len[i], bad); + info->dir_dname[i] = *p; + *p += info->dir_dname_len[i]; + dout("parsed dir dname '%.*s'\n", info->dir_dname_len[i], + info->dir_dname[i]); + info->dir_dlease[i] = *p; + *p += sizeof(struct ceph_mds_reply_lease); + + /* inode */ + err = parse_reply_info_in(p, end, &info->dir_in[i]); + if (err < 0) + goto out_bad; + i++; + num--; + } + +done: + if (*p != end) + goto bad; + return 0; + +bad: + err = -EIO; +out_bad: + pr_err("problem parsing dir contents %d\n", err); + return err; +} + +/* + * parse entire mds reply + */ +static int parse_reply_info(struct ceph_msg *msg, + struct ceph_mds_reply_info_parsed *info) +{ + void *p, *end; + u32 len; + int err; + + info->head = msg->front.iov_base; + p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head); + end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head); + + /* trace */ + ceph_decode_32_safe(&p, end, len, bad); + if (len > 0) { + err = parse_reply_info_trace(&p, p+len, info); + if (err < 0) + goto out_bad; + } + + /* dir content */ + ceph_decode_32_safe(&p, end, len, bad); + if (len > 0) { + err = parse_reply_info_dir(&p, p+len, info); + if (err < 0) + goto out_bad; + } + + /* snap blob */ + ceph_decode_32_safe(&p, end, len, bad); + info->snapblob_len = len; + info->snapblob = p; + p += len; + + if (p != end) + goto bad; + return 0; + +bad: + err = -EIO; +out_bad: + pr_err("mds parse_reply err %d\n", err); + return err; +} + +static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info) +{ + kfree(info->dir_in); +} + + +/* + * sessions + */ +static const char *session_state_name(int s) +{ + switch (s) { + case CEPH_MDS_SESSION_NEW: return "new"; + case CEPH_MDS_SESSION_OPENING: return "opening"; + case CEPH_MDS_SESSION_OPEN: return "open"; + case CEPH_MDS_SESSION_HUNG: return "hung"; + case CEPH_MDS_SESSION_CLOSING: return "closing"; + case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting"; + default: return "???"; + } +} + +static struct ceph_mds_session *get_session(struct ceph_mds_session *s) +{ + if (atomic_inc_not_zero(&s->s_ref)) { + dout("mdsc get_session %p %d -> %d\n", s, + atomic_read(&s->s_ref)-1, atomic_read(&s->s_ref)); + return s; + } else { + dout("mdsc get_session %p 0 -- FAIL", s); + return NULL; + } +} + +void ceph_put_mds_session(struct ceph_mds_session *s) +{ + dout("mdsc put_session %p %d -> %d\n", s, + atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1); + if (atomic_dec_and_test(&s->s_ref)) { + ceph_con_shutdown(&s->s_con); + kfree(s); + } +} + +/* + * called under mdsc->mutex + */ +struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc, + int mds) +{ + struct ceph_mds_session *session; + + if (mds >= mdsc->max_sessions || mdsc->sessions[mds] == NULL) + return NULL; + session = mdsc->sessions[mds]; + dout("lookup_mds_session %p %d\n", session, + atomic_read(&session->s_ref)); + get_session(session); + return session; +} + +static bool __have_session(struct ceph_mds_client *mdsc, int mds) +{ + if (mds >= mdsc->max_sessions) + return false; + return mdsc->sessions[mds]; +} + +/* + * create+register a new session for given mds. + * called under mdsc->mutex. + */ +static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc, + int mds) +{ + struct ceph_mds_session *s; + + s = kzalloc(sizeof(*s), GFP_NOFS); + s->s_mdsc = mdsc; + s->s_mds = mds; + s->s_state = CEPH_MDS_SESSION_NEW; + s->s_ttl = 0; + s->s_seq = 0; + mutex_init(&s->s_mutex); + + ceph_con_init(mdsc->client->msgr, &s->s_con); + s->s_con.private = s; + s->s_con.ops = &mds_con_ops; + s->s_con.peer_name.type = CEPH_ENTITY_TYPE_MDS; + s->s_con.peer_name.num = cpu_to_le64(mds); + ceph_con_open(&s->s_con, ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); + + spin_lock_init(&s->s_cap_lock); + s->s_cap_gen = 0; + s->s_cap_ttl = 0; + s->s_renew_requested = 0; + s->s_renew_seq = 0; + INIT_LIST_HEAD(&s->s_caps); + s->s_nr_caps = 0; + atomic_set(&s->s_ref, 1); + INIT_LIST_HEAD(&s->s_waiting); + INIT_LIST_HEAD(&s->s_unsafe); + s->s_num_cap_releases = 0; + INIT_LIST_HEAD(&s->s_cap_releases); + INIT_LIST_HEAD(&s->s_cap_releases_done); + INIT_LIST_HEAD(&s->s_cap_flushing); + INIT_LIST_HEAD(&s->s_cap_snaps_flushing); + + dout("register_session mds%d\n", mds); + if (mds >= mdsc->max_sessions) { + int newmax = 1 << get_count_order(mds+1); + struct ceph_mds_session **sa; + + dout("register_session realloc to %d\n", newmax); + sa = kcalloc(newmax, sizeof(void *), GFP_NOFS); + if (sa == NULL) + return ERR_PTR(-ENOMEM); + if (mdsc->sessions) { + memcpy(sa, mdsc->sessions, + mdsc->max_sessions * sizeof(void *)); + kfree(mdsc->sessions); + } + mdsc->sessions = sa; + mdsc->max_sessions = newmax; + } + mdsc->sessions[mds] = s; + atomic_inc(&s->s_ref); /* one ref to sessions[], one to caller */ + return s; +} + +/* + * called under mdsc->mutex + */ +static void unregister_session(struct ceph_mds_client *mdsc, int mds) +{ + dout("unregister_session mds%d %p\n", mds, mdsc->sessions[mds]); + ceph_put_mds_session(mdsc->sessions[mds]); + mdsc->sessions[mds] = NULL; +} + +/* + * drop session refs in request. + * + * should be last request ref, or hold mdsc->mutex + */ +static void put_request_session(struct ceph_mds_request *req) +{ + if (req->r_session) { + ceph_put_mds_session(req->r_session); + req->r_session = NULL; + } +} + +void ceph_mdsc_put_request(struct ceph_mds_request *req) +{ + dout("mdsc put_request %p %d -> %d\n", req, + atomic_read(&req->r_ref), atomic_read(&req->r_ref)-1); + if (atomic_dec_and_test(&req->r_ref)) { + if (req->r_request) + ceph_msg_put(req->r_request); + if (req->r_reply) { + ceph_msg_put(req->r_reply); + destroy_reply_info(&req->r_reply_info); + } + if (req->r_inode) { + ceph_put_cap_refs(ceph_inode(req->r_inode), + CEPH_CAP_PIN); + iput(req->r_inode); + } + if (req->r_locked_dir) + ceph_put_cap_refs(ceph_inode(req->r_locked_dir), + CEPH_CAP_PIN); + if (req->r_target_inode) + iput(req->r_target_inode); + if (req->r_dentry) + dput(req->r_dentry); + if (req->r_old_dentry) { + ceph_put_cap_refs( + ceph_inode(req->r_old_dentry->d_parent->d_inode), + CEPH_CAP_PIN); + dput(req->r_old_dentry); + } + kfree(req->r_path1); + kfree(req->r_path2); + put_request_session(req); + ceph_unreserve_caps(&req->r_caps_reservation); + kfree(req); + } +} + +/* + * lookup session, bump ref if found. + * + * called under mdsc->mutex. + */ +static struct ceph_mds_request *__lookup_request(struct ceph_mds_client *mdsc, + u64 tid) +{ + struct ceph_mds_request *req; + req = radix_tree_lookup(&mdsc->request_tree, tid); + if (req) + ceph_mdsc_get_request(req); + return req; +} + +/* + * Register an in-flight request, and assign a tid. Link to directory + * are modifying (if any). + * + * Called under mdsc->mutex. + */ +static void __register_request(struct ceph_mds_client *mdsc, + struct ceph_mds_request *req, + struct inode *dir) +{ + req->r_tid = ++mdsc->last_tid; + if (req->r_num_caps) + ceph_reserve_caps(&req->r_caps_reservation, req->r_num_caps); + dout("__register_request %p tid %lld\n", req, req->r_tid); + ceph_mdsc_get_request(req); + radix_tree_insert(&mdsc->request_tree, req->r_tid, (void *)req); + + if (dir) { + struct ceph_inode_info *ci = ceph_inode(dir); + + spin_lock(&ci->i_unsafe_lock); + req->r_unsafe_dir = dir; + list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops); + spin_unlock(&ci->i_unsafe_lock); + } +} + +static void __unregister_request(struct ceph_mds_client *mdsc, + struct ceph_mds_request *req) +{ + dout("__unregister_request %p tid %lld\n", req, req->r_tid); + radix_tree_delete(&mdsc->request_tree, req->r_tid); + ceph_mdsc_put_request(req); + + if (req->r_unsafe_dir) { + struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir); + + spin_lock(&ci->i_unsafe_lock); + list_del_init(&req->r_unsafe_dir_item); + spin_unlock(&ci->i_unsafe_lock); + } +} + +/* + * Choose mds to send request to next. If there is a hint set in the + * request (e.g., due to a prior forward hint from the mds), use that. + * Otherwise, consult frag tree and/or caps to identify the + * appropriate mds. If all else fails, choose randomly. + * + * Called under mdsc->mutex. + */ +static int __choose_mds(struct ceph_mds_client *mdsc, + struct ceph_mds_request *req) +{ + struct inode *inode; + struct ceph_inode_info *ci; + struct ceph_cap *cap; + int mode = req->r_direct_mode; + int mds = -1; + u32 hash = req->r_direct_hash; + bool is_hash = req->r_direct_is_hash; + + /* + * is there a specific mds we should try? ignore hint if we have + * no session and the mds is not up (active or recovering). + */ + if (req->r_resend_mds >= 0 && + (__have_session(mdsc, req->r_resend_mds) || + ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) { + dout("choose_mds using resend_mds mds%d\n", + req->r_resend_mds); + return req->r_resend_mds; + } + + if (mode == USE_RANDOM_MDS) + goto random; + + inode = NULL; + if (req->r_inode) { + inode = req->r_inode; + } else if (req->r_dentry) { + if (req->r_dentry->d_inode) { + inode = req->r_dentry->d_inode; + } else { + inode = req->r_dentry->d_parent->d_inode; + hash = req->r_dentry->d_name.hash; + is_hash = true; + } + } + dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash, + (int)hash, mode); + if (!inode) + goto random; + ci = ceph_inode(inode); + + if (is_hash && S_ISDIR(inode->i_mode)) { + struct ceph_inode_frag frag; + int found; + + ceph_choose_frag(ci, hash, &frag, &found); + if (found) { + if (mode == USE_ANY_MDS && frag.ndist > 0) { + u8 r; + + /* choose a random replica */ + get_random_bytes(&r, 1); + r %= frag.ndist; + mds = frag.dist[r]; + dout("choose_mds %p %llx.%llx " + "frag %u mds%d (%d/%d)\n", + inode, ceph_vinop(inode), + frag.frag, frag.mds, + (int)r, frag.ndist); + return mds; + } + + /* since this file/dir wasn't known to be + * replicated, then we want to look for the + * authoritative mds. */ + mode = USE_AUTH_MDS; + if (frag.mds >= 0) { + /* choose auth mds */ + mds = frag.mds; + dout("choose_mds %p %llx.%llx " + "frag %u mds%d (auth)\n", + inode, ceph_vinop(inode), frag.frag, mds); + return mds; + } + } + } + + spin_lock(&inode->i_lock); + cap = NULL; + if (mode == USE_AUTH_MDS) + cap = ci->i_auth_cap; + if (!cap && !RB_EMPTY_ROOT(&ci->i_caps)) + cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node); + if (!cap) { + spin_unlock(&inode->i_lock); + goto random; + } + mds = cap->session->s_mds; + dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n", + inode, ceph_vinop(inode), mds, + cap == ci->i_auth_cap ? "auth " : "", cap); + spin_unlock(&inode->i_lock); + return mds; + +random: + mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap); + dout("choose_mds chose random mds%d\n", mds); + return mds; +} + + +/* + * session messages + */ +static struct ceph_msg *create_session_msg(u32 op, u64 seq) +{ + struct ceph_msg *msg; + struct ceph_mds_session_head *h; + + msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), 0, 0, NULL); + if (IS_ERR(msg)) { + pr_err("create_session_msg ENOMEM creating msg\n"); + return ERR_PTR(PTR_ERR(msg)); + } + h = msg->front.iov_base; + h->op = cpu_to_le32(op); + h->seq = cpu_to_le64(seq); + return msg; +} + +/* + * send session open request. + * + * called under mdsc->mutex + */ +static int __open_session(struct ceph_mds_client *mdsc, + struct ceph_mds_session *session) +{ + struct ceph_msg *msg; + int mstate; + int mds = session->s_mds; + int err = 0; + + /* wait for mds to go active? */ + mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds); + dout("open_session to mds%d (%s)\n", mds, + ceph_mds_state_name(mstate)); + session->s_state = CEPH_MDS_SESSION_OPENING; + session->s_renew_requested = jiffies; + + /* send connect message */ + msg = create_session_msg(CEPH_SESSION_REQUEST_OPEN, session->s_seq); + if (IS_ERR(msg)) { + err = PTR_ERR(msg); + goto out; + } + ceph_con_send(&session->s_con, msg); + +out: + return 0; +} + +/* + * session caps + */ + +/* + * Free preallocated cap messages assigned to this session + */ +static void cleanup_cap_releases(struct ceph_mds_session *session) +{ + struct ceph_msg *msg; + + spin_lock(&session->s_cap_lock); + while (!list_empty(&session->s_cap_releases)) { + msg = list_first_entry(&session->s_cap_releases, + struct ceph_msg, list_head); + list_del_init(&msg->list_head); + ceph_msg_put(msg); + } + while (!list_empty(&session->s_cap_releases_done)) { + msg = list_first_entry(&session->s_cap_releases_done, + struct ceph_msg, list_head); + list_del_init(&msg->list_head); + ceph_msg_put(msg); + } + spin_unlock(&session->s_cap_lock); +} + +/* + * Helper to safely iterate over all caps associated with a session. + * + * caller must hold session s_mutex + */ +static int iterate_session_caps(struct ceph_mds_session *session, + int (*cb)(struct inode *, struct ceph_cap *, + void *), void *arg) +{ + struct ceph_cap *cap, *ncap; + struct inode *inode; + int ret; + + dout("iterate_session_caps %p mds%d\n", session, session->s_mds); + spin_lock(&session->s_cap_lock); + list_for_each_entry_safe(cap, ncap, &session->s_caps, session_caps) { + inode = igrab(&cap->ci->vfs_inode); + if (!inode) + continue; + spin_unlock(&session->s_cap_lock); + ret = cb(inode, cap, arg); + iput(inode); + if (ret < 0) + return ret; + spin_lock(&session->s_cap_lock); + } + spin_unlock(&session->s_cap_lock); + + return 0; +} + +static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap, + void *arg) +{ + struct ceph_inode_info *ci = ceph_inode(inode); + dout("removing cap %p, ci is %p, inode is %p\n", + cap, ci, &ci->vfs_inode); + ceph_remove_cap(cap); + return 0; +} + +/* + * caller must hold session s_mutex + */ +static void remove_session_caps(struct ceph_mds_session *session) +{ + dout("remove_session_caps on %p\n", session); + iterate_session_caps(session, remove_session_caps_cb, NULL); + BUG_ON(session->s_nr_caps > 0); + cleanup_cap_releases(session); +} + +/* + * wake up any threads waiting on this session's caps. if the cap is + * old (didn't get renewed on the client reconnect), remove it now. + * + * caller must hold s_mutex. + */ +static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap, + void *arg) +{ + struct ceph_mds_session *session = arg; + + spin_lock(&inode->i_lock); + if (cap->gen != session->s_cap_gen) { + pr_err("failed reconnect %p %llx.%llx cap %p " + "(gen %d < session %d)\n", inode, ceph_vinop(inode), + cap, cap->gen, session->s_cap_gen); + __ceph_remove_cap(cap, NULL); + } + wake_up(&ceph_inode(inode)->i_cap_wq); + spin_unlock(&inode->i_lock); + return 0; +} + +static void wake_up_session_caps(struct ceph_mds_session *session) +{ + dout("wake_up_session_caps %p mds%d\n", session, session->s_mds); + iterate_session_caps(session, wake_up_session_cb, session); +} + +/* + * Send periodic message to MDS renewing all currently held caps. The + * ack will reset the expiration for all caps from this session. + * + * caller holds s_mutex + */ +static int send_renew_caps(struct ceph_mds_client *mdsc, + struct ceph_mds_session *session) +{ + struct ceph_msg *msg; + int state; + + if (time_after_eq(jiffies, session->s_cap_ttl) && + time_after_eq(session->s_cap_ttl, session->s_renew_requested)) + pr_info("mds%d caps stale\n", session->s_mds); + + /* do not try to renew caps until a recovering mds has reconnected + * with its clients. */ + state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds); + if (state < CEPH_MDS_STATE_RECONNECT) { + dout("send_renew_caps ignoring mds%d (%s)\n", + session->s_mds, ceph_mds_state_name(state)); + return 0; + } + + dout("send_renew_caps to mds%d (%s)\n", session->s_mds, + ceph_mds_state_name(state)); + session->s_renew_requested = jiffies; + msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS, + ++session->s_renew_seq); + if (IS_ERR(msg)) + return PTR_ERR(msg); + ceph_con_send(&session->s_con, msg); + return 0; +} + +/* + * Note new cap ttl, and any transition from stale -> not stale (fresh?). + */ +static void renewed_caps(struct ceph_mds_client *mdsc, + struct ceph_mds_session *session, int is_renew) +{ + int was_stale; + int wake = 0; + + spin_lock(&session->s_cap_lock); + was_stale = is_renew && (session->s_cap_ttl == 0 || + time_after_eq(jiffies, session->s_cap_ttl)); + + session->s_cap_ttl = session->s_renew_requested + + mdsc->mdsmap->m_session_timeout*HZ; + + if (was_stale) { + if (time_before(jiffies, session->s_cap_ttl)) { + pr_info("mds%d caps renewed\n", session->s_mds); + wake = 1; + } else { + pr_info("mds%d caps still stale\n", session->s_mds); + } + } + dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n", + session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh", + time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh"); + spin_unlock(&session->s_cap_lock); + + if (wake) + wake_up_session_caps(session); +} + +/* + * send a session close request + */ +static int request_close_session(struct ceph_mds_client *mdsc, + struct ceph_mds_session *session) +{ + struct ceph_msg *msg; + int err = 0; + + dout("request_close_session mds%d state %s seq %lld\n", + session->s_mds, session_state_name(session->s_state), + session->s_seq); + msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq); + if (IS_ERR(msg)) + err = PTR_ERR(msg); + else + ceph_con_send(&session->s_con, msg); + return err; +} + +/* + * Called with s_mutex held. + */ +static int __close_session(struct ceph_mds_client *mdsc, + struct ceph_mds_session *session) +{ + if (session->s_state >= CEPH_MDS_SESSION_CLOSING) + return 0; + session->s_state = CEPH_MDS_SESSION_CLOSING; + return request_close_session(mdsc, session); +} + +/* + * Trim old(er) caps. + * + * Because we can't cache an inode without one or more caps, we do + * this indirectly: if a cap is unused, we prune its aliases, at which + * point the inode will hopefully get dropped to. + * + * Yes, this is a bit sloppy. Our only real goal here is to respond to + * memory pressure from the MDS, though, so it needn't be perfect. + */ +static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg) +{ + struct ceph_mds_session *session = arg; + struct ceph_inode_info *ci = ceph_inode(inode); + int used, oissued, mine; + + if (session->s_trim_caps <= 0) + return -1; + + spin_lock(&inode->i_lock); + mine = cap->issued | cap->implemented; + used = __ceph_caps_used(ci); + oissued = __ceph_caps_issued_other(ci, cap); + + dout("trim_caps_cb %p cap %p mine %s oissued %s used %s\n", + inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued), + ceph_cap_string(used)); + if (ci->i_dirty_caps) + goto out; /* dirty caps */ + if ((used & ~oissued) & mine) + goto out; /* we need these caps */ + + session->s_trim_caps--; + if (oissued) { + /* we aren't the only cap.. just remove us */ + __ceph_remove_cap(cap, NULL); + } else { + /* try to drop referring dentries */ + spin_unlock(&inode->i_lock); + d_prune_aliases(inode); + dout("trim_caps_cb %p cap %p pruned, count now %d\n", + inode, cap, atomic_read(&inode->i_count)); + return 0; + } + +out: + spin_unlock(&inode->i_lock); + return 0; +} + +/* + * Trim session cap count down to some max number. + */ +static int trim_caps(struct ceph_mds_client *mdsc, + struct ceph_mds_session *session, + int max_caps) +{ + int trim_caps = session->s_nr_caps - max_caps; + + dout("trim_caps mds%d start: %d / %d, trim %d\n", + session->s_mds, session->s_nr_caps, max_caps, trim_caps); + if (trim_caps > 0) { + session->s_trim_caps = trim_caps; + iterate_session_caps(session, trim_caps_cb, session); + dout("trim_caps mds%d done: %d / %d, trimmed %d\n", + session->s_mds, session->s_nr_caps, max_caps, + trim_caps - session->s_trim_caps); + } + return 0; +} + +/* + * Allocate cap_release messages. If there is a partially full message + * in the queue, try to allocate enough to cover it's remainder, so that + * we can send it immediately. + * + * Called under s_mutex. + */ +static int add_cap_releases(struct ceph_mds_client *mdsc, + struct ceph_mds_session *session, + int extra) +{ + struct ceph_msg *msg; + struct ceph_mds_cap_release *head; + int err = -ENOMEM; + + if (extra < 0) + extra = mdsc->client->mount_args.cap_release_safety; + + spin_lock(&session->s_cap_lock); + + if (!list_empty(&session->s_cap_releases)) { + msg = list_first_entry(&session->s_cap_releases, + struct ceph_msg, + list_head); + head = msg->front.iov_base; + extra += CEPH_CAPS_PER_RELEASE - le32_to_cpu(head->num); + } + + while (session->s_num_cap_releases < session->s_nr_caps + extra) { + spin_unlock(&session->s_cap_lock); + msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, PAGE_CACHE_SIZE, + 0, 0, NULL); + if (!msg) + goto out_unlocked; + dout("add_cap_releases %p msg %p now %d\n", session, msg, + (int)msg->front.iov_len); + head = msg->front.iov_base; + head->num = cpu_to_le32(0); + msg->front.iov_len = sizeof(*head); + spin_lock(&session->s_cap_lock); + list_add(&msg->list_head, &session->s_cap_releases); + session->s_num_cap_releases += CEPH_CAPS_PER_RELEASE; + } + + if (!list_empty(&session->s_cap_releases)) { + msg = list_first_entry(&session->s_cap_releases, + struct ceph_msg, + list_head); + head = msg->front.iov_base; + if (head->num) { + dout(" queueing non-full %p (%d)\n", msg, + le32_to_cpu(head->num)); + list_move_tail(&msg->list_head, + &session->s_cap_releases_done); + session->s_num_cap_releases -= + CEPH_CAPS_PER_RELEASE - le32_to_cpu(head->num); + } + } + err = 0; + spin_unlock(&session->s_cap_lock); +out_unlocked: + return err; +} + +/* + * flush all dirty inode data to disk. + * + * returns true if we've flushed through want_flush_seq + */ +static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq) +{ + int mds, ret = 1; + + dout("check_cap_flush want %lld\n", want_flush_seq); + mutex_lock(&mdsc->mutex); + for (mds = 0; ret && mds < mdsc->max_sessions; mds++) { + struct ceph_mds_session *session = mdsc->sessions[mds]; + + if (!session) + continue; + get_session(session); + mutex_unlock(&mdsc->mutex); + + mutex_lock(&session->s_mutex); + if (!list_empty(&session->s_cap_flushing)) { + struct ceph_inode_info *ci = + list_entry(session->s_cap_flushing.next, + struct ceph_inode_info, + i_flushing_item); + struct inode *inode = &ci->vfs_inode; + + spin_lock(&inode->i_lock); + if (ci->i_cap_flush_seq <= want_flush_seq) { + dout("check_cap_flush still flushing %p " + "seq %lld <= %lld to mds%d\n", inode, + ci->i_cap_flush_seq, want_flush_seq, + session->s_mds); + ret = 0; + } + spin_unlock(&inode->i_lock); + } + mutex_unlock(&session->s_mutex); + ceph_put_mds_session(session); + + if (!ret) + return ret; + mutex_lock(&mdsc->mutex); + } + + mutex_unlock(&mdsc->mutex); + dout("check_cap_flush ok, flushed thru %lld\n", want_flush_seq); + return ret; +} + +/* + * called under s_mutex + */ +static void send_cap_releases(struct ceph_mds_client *mdsc, + struct ceph_mds_session *session) +{ + struct ceph_msg *msg; + + dout("send_cap_releases mds%d\n", session->s_mds); + while (1) { + spin_lock(&session->s_cap_lock); + if (list_empty(&session->s_cap_releases_done)) + break; + msg = list_first_entry(&session->s_cap_releases_done, + struct ceph_msg, list_head); + list_del_init(&msg->list_head); + spin_unlock(&session->s_cap_lock); + msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); + dout("send_cap_releases mds%d %p\n", session->s_mds, msg); + ceph_con_send(&session->s_con, msg); + } + spin_unlock(&session->s_cap_lock); +} + +/* + * requests + */ + +/* + * Create an mds request. + */ +struct ceph_mds_request * +ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode) +{ + struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS); + + if (!req) + return ERR_PTR(-ENOMEM); + + req->r_started = jiffies; + req->r_resend_mds = -1; + INIT_LIST_HEAD(&req->r_unsafe_dir_item); + req->r_fmode = -1; + atomic_set(&req->r_ref, 1); /* one for request_tree, one for caller */ + INIT_LIST_HEAD(&req->r_wait); + init_completion(&req->r_completion); + init_completion(&req->r_safe_completion); + INIT_LIST_HEAD(&req->r_unsafe_item); + + req->r_op = op; + req->r_direct_mode = mode; + return req; +} + +/* + * return oldest (lowest) tid in request tree, 0 if none. + * + * called under mdsc->mutex. + */ +static u64 __get_oldest_tid(struct ceph_mds_client *mdsc) +{ + struct ceph_mds_request *first; + if (radix_tree_gang_lookup(&mdsc->request_tree, + (void **)&first, 0, 1) <= 0) + return 0; + return first->r_tid; +} + +/* + * Build a dentry's path. Allocate on heap; caller must kfree. Based + * on build_path_from_dentry in fs/cifs/dir.c. + * + * If @stop_on_nosnap, generate path relative to the first non-snapped + * inode. + * + * Encode hidden .snap dirs as a double /, i.e. + * foo/.snap/bar -> foo//bar + */ +char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base, + int stop_on_nosnap) +{ + struct dentry *temp; + char *path; + int len, pos; + + if (dentry == NULL) + return ERR_PTR(-EINVAL); + +retry: + len = 0; + for (temp = dentry; !IS_ROOT(temp);) { + struct inode *inode = temp->d_inode; + if (inode && ceph_snap(inode) == CEPH_SNAPDIR) + len++; /* slash only */ + else if (stop_on_nosnap && inode && + ceph_snap(inode) == CEPH_NOSNAP) + break; + else + len += 1 + temp->d_name.len; + temp = temp->d_parent; + if (temp == NULL) { + pr_err("build_path_dentry corrupt dentry %p\n", dentry); + return ERR_PTR(-EINVAL); + } + } + if (len) + len--; /* no leading '/' */ + + path = kmalloc(len+1, GFP_NOFS); + if (path == NULL) + return ERR_PTR(-ENOMEM); + pos = len; + path[pos] = 0; /* trailing null */ + for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) { + struct inode *inode = temp->d_inode; + + if (inode && ceph_snap(inode) == CEPH_SNAPDIR) { + dout("build_path_dentry path+%d: %p SNAPDIR\n", + pos, temp); + } else if (stop_on_nosnap && inode && + ceph_snap(inode) == CEPH_NOSNAP) { + break; + } else { + pos -= temp->d_name.len; + if (pos < 0) + break; + strncpy(path + pos, temp->d_name.name, + temp->d_name.len); + dout("build_path_dentry path+%d: %p '%.*s'\n", + pos, temp, temp->d_name.len, path + pos); + } + if (pos) + path[--pos] = '/'; + temp = temp->d_parent; + if (temp == NULL) { + pr_err("build_path_dentry corrupt dentry\n"); + kfree(path); + return ERR_PTR(-EINVAL); + } + } + if (pos != 0) { + pr_err("build_path_dentry did not end path lookup where " + "expected, namelen is %d, pos is %d\n", len, pos); + /* presumably this is only possible if racing with a + rename of one of the parent directories (we can not + lock the dentries above us to prevent this, but + retrying should be harmless) */ + kfree(path); + goto retry; + } + + *base = ceph_ino(temp->d_inode); + *plen = len; + dout("build_path_dentry on %p %d built %llx '%.*s'\n", + dentry, atomic_read(&dentry->d_count), *base, len, path); + return path; +} + +static int build_dentry_path(struct dentry *dentry, + const char **ppath, int *ppathlen, u64 *pino, + int *pfreepath) +{ + char *path; + + if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP) { + *pino = ceph_ino(dentry->d_parent->d_inode); + *ppath = dentry->d_name.name; + *ppathlen = dentry->d_name.len; + return 0; + } + path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1); + if (IS_ERR(path)) + return PTR_ERR(path); + *ppath = path; + *pfreepath = 1; + return 0; +} + +static int build_inode_path(struct inode *inode, + const char **ppath, int *ppathlen, u64 *pino, + int *pfreepath) +{ + struct dentry *dentry; + char *path; + + if (ceph_snap(inode) == CEPH_NOSNAP) { + *pino = ceph_ino(inode); + *ppathlen = 0; + return 0; + } + dentry = d_find_alias(inode); + path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1); + dput(dentry); + if (IS_ERR(path)) + return PTR_ERR(path); + *ppath = path; + *pfreepath = 1; + return 0; +} + +/* + * request arguments may be specified via an inode *, a dentry *, or + * an explicit ino+path. + */ +static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry, + const char *rpath, u64 rino, + const char **ppath, int *pathlen, + u64 *ino, int *freepath) +{ + int r = 0; + + if (rinode) { + r = build_inode_path(rinode, ppath, pathlen, ino, freepath); + dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode), + ceph_snap(rinode)); + } else if (rdentry) { + r = build_dentry_path(rdentry, ppath, pathlen, ino, freepath); + dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen, + *ppath); + } else if (rpath) { + *ino = rino; + *ppath = rpath; + *pathlen = strlen(rpath); + dout(" path %.*s\n", *pathlen, rpath); + } + + return r; +} + +/* + * called under mdsc->mutex + */ +static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, + struct ceph_mds_request *req, + int mds) +{ + struct ceph_msg *msg; + struct ceph_mds_request_head *head; + const char *path1 = NULL; + const char *path2 = NULL; + u64 ino1 = 0, ino2 = 0; + int pathlen1 = 0, pathlen2 = 0; + int freepath1 = 0, freepath2 = 0; + int len; + u16 releases; + void *p, *end; + int ret; + + ret = set_request_path_attr(req->r_inode, req->r_dentry, + req->r_path1, req->r_ino1.ino, + &path1, &pathlen1, &ino1, &freepath1); + if (ret < 0) { + msg = ERR_PTR(ret); + goto out; + } + + ret = set_request_path_attr(NULL, req->r_old_dentry, + req->r_path2, req->r_ino2.ino, + &path2, &pathlen2, &ino2, &freepath2); + if (ret < 0) { + msg = ERR_PTR(ret); + goto out_free1; + } + + len = sizeof(*head) + + pathlen1 + pathlen2 + 2*(sizeof(u32) + sizeof(u64)); + + /* calculate (max) length for cap releases */ + len += sizeof(struct ceph_mds_request_release) * + (!!req->r_inode_drop + !!req->r_dentry_drop + + !!req->r_old_inode_drop + !!req->r_old_dentry_drop); + if (req->r_dentry_drop) + len += req->r_dentry->d_name.len; + if (req->r_old_dentry_drop) + len += req->r_old_dentry->d_name.len; + + msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, 0, 0, NULL); + if (IS_ERR(msg)) + goto out_free2; + + head = msg->front.iov_base; + p = msg->front.iov_base + sizeof(*head); + end = msg->front.iov_base + msg->front.iov_len; + + head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch); + head->op = cpu_to_le32(req->r_op); + head->caller_uid = cpu_to_le32(current_fsuid()); + head->caller_gid = cpu_to_le32(current_fsgid()); + head->args = req->r_args; + + ceph_encode_filepath(&p, end, ino1, path1); + ceph_encode_filepath(&p, end, ino2, path2); + + /* cap releases */ + releases = 0; + if (req->r_inode_drop) + releases += ceph_encode_inode_release(&p, + req->r_inode ? req->r_inode : req->r_dentry->d_inode, + mds, req->r_inode_drop, req->r_inode_unless, 0); + if (req->r_dentry_drop) + releases += ceph_encode_dentry_release(&p, req->r_dentry, + mds, req->r_dentry_drop, req->r_dentry_unless); + if (req->r_old_dentry_drop) + releases += ceph_encode_dentry_release(&p, req->r_old_dentry, + mds, req->r_old_dentry_drop, req->r_old_dentry_unless); + if (req->r_old_inode_drop) + releases += ceph_encode_inode_release(&p, + req->r_old_dentry->d_inode, + mds, req->r_old_inode_drop, req->r_old_inode_unless, 0); + head->num_releases = cpu_to_le16(releases); + + BUG_ON(p > end); + msg->front.iov_len = p - msg->front.iov_base; + msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); + + msg->pages = req->r_pages; + msg->nr_pages = req->r_num_pages; + msg->hdr.data_len = cpu_to_le32(req->r_data_len); + msg->hdr.data_off = cpu_to_le16(0); + +out_free2: + if (freepath2) + kfree((char *)path2); +out_free1: + if (freepath1) + kfree((char *)path1); +out: + return msg; +} + +/* + * called under mdsc->mutex if error, under no mutex if + * success. + */ +static void complete_request(struct ceph_mds_client *mdsc, + struct ceph_mds_request *req) +{ + if (req->r_callback) + req->r_callback(mdsc, req); + else + complete(&req->r_completion); +} + +/* + * called under mdsc->mutex + */ +static int __prepare_send_request(struct ceph_mds_client *mdsc, + struct ceph_mds_request *req, + int mds) +{ + struct ceph_mds_request_head *rhead; + struct ceph_msg *msg; + int flags = 0; + + req->r_mds = mds; + req->r_attempts++; + dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req, + req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts); + + if (req->r_request) { + ceph_msg_put(req->r_request); + req->r_request = NULL; + } + msg = create_request_message(mdsc, req, mds); + if (IS_ERR(msg)) { + req->r_reply = ERR_PTR(PTR_ERR(msg)); + complete_request(mdsc, req); + return -PTR_ERR(msg); + } + req->r_request = msg; + + rhead = msg->front.iov_base; + rhead->tid = cpu_to_le64(req->r_tid); + rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc)); + if (req->r_got_unsafe) + flags |= CEPH_MDS_FLAG_REPLAY; + if (req->r_locked_dir) + flags |= CEPH_MDS_FLAG_WANT_DENTRY; + rhead->flags = cpu_to_le32(flags); + rhead->num_fwd = req->r_num_fwd; + rhead->num_retry = req->r_attempts - 1; + + dout(" r_locked_dir = %p\n", req->r_locked_dir); + + if (req->r_target_inode && req->r_got_unsafe) + rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode)); + else + rhead->ino = 0; + return 0; +} + +/* + * send request, or put it on the appropriate wait list. + */ +static int __do_request(struct ceph_mds_client *mdsc, + struct ceph_mds_request *req) +{ + struct ceph_mds_session *session = NULL; + int mds = -1; + int err = -EAGAIN; + + if (req->r_reply) + goto out; + + if (req->r_timeout && + time_after_eq(jiffies, req->r_started + req->r_timeout)) { + dout("do_request timed out\n"); + err = -EIO; + goto finish; + } + + mds = __choose_mds(mdsc, req); + if (mds < 0 || + ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) { + dout("do_request no mds or not active, waiting for map\n"); + list_add(&req->r_wait, &mdsc->waiting_for_map); + goto out; + } + + /* get, open session */ + session = __ceph_lookup_mds_session(mdsc, mds); + if (!session) + session = register_session(mdsc, mds); + dout("do_request mds%d session %p state %s\n", mds, session, + session_state_name(session->s_state)); + if (session->s_state != CEPH_MDS_SESSION_OPEN && + session->s_state != CEPH_MDS_SESSION_HUNG) { + if (session->s_state == CEPH_MDS_SESSION_NEW || + session->s_state == CEPH_MDS_SESSION_CLOSING) + __open_session(mdsc, session); + list_add(&req->r_wait, &session->s_waiting); + goto out_session; + } + + /* send request */ + req->r_session = get_session(session); + req->r_resend_mds = -1; /* forget any previous mds hint */ + + if (req->r_request_started == 0) /* note request start time */ + req->r_request_started = jiffies; + + err = __prepare_send_request(mdsc, req, mds); + if (!err) { + ceph_msg_get(req->r_request); + ceph_con_send(&session->s_con, req->r_request); + } + +out_session: + ceph_put_mds_session(session); +out: + return err; + +finish: + req->r_reply = ERR_PTR(err); + complete_request(mdsc, req); + goto out; +} + +/* + * called under mdsc->mutex + */ +static void __wake_requests(struct ceph_mds_client *mdsc, + struct list_head *head) +{ + struct ceph_mds_request *req, *nreq; + + list_for_each_entry_safe(req, nreq, head, r_wait) { + list_del_init(&req->r_wait); + __do_request(mdsc, req); + } +} + +/* + * Wake up threads with requests pending for @mds, so that they can + * resubmit their requests to a possibly different mds. If @all is set, + * wake up if their requests has been forwarded to @mds, too. + */ +static void kick_requests(struct ceph_mds_client *mdsc, int mds, int all) +{ + struct ceph_mds_request *reqs[10]; + u64 nexttid = 0; + int i, got; + + dout("kick_requests mds%d\n", mds); + while (nexttid <= mdsc->last_tid) { + got = radix_tree_gang_lookup(&mdsc->request_tree, + (void **)&reqs, nexttid, 10); + if (got == 0) + break; + nexttid = reqs[got-1]->r_tid + 1; + for (i = 0; i < got; i++) { + if (reqs[i]->r_got_unsafe) + continue; + if (reqs[i]->r_session && + reqs[i]->r_session->s_mds == mds) { + dout(" kicking tid %llu\n", reqs[i]->r_tid); + put_request_session(reqs[i]); + __do_request(mdsc, reqs[i]); + } + } + } +} + +void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, + struct ceph_mds_request *req) +{ + dout("submit_request on %p\n", req); + mutex_lock(&mdsc->mutex); + __register_request(mdsc, req, NULL); + __do_request(mdsc, req); + mutex_unlock(&mdsc->mutex); +} + +/* + * Synchrously perform an mds request. Take care of all of the + * session setup, forwarding, retry details. + */ +int ceph_mdsc_do_request(struct ceph_mds_client *mdsc, + struct inode *dir, + struct ceph_mds_request *req) +{ + int err; + + dout("do_request on %p\n", req); + + /* take CAP_PIN refs for r_inode, r_locked_dir, r_old_dentry */ + if (req->r_inode) + ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN); + if (req->r_locked_dir) + ceph_get_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN); + if (req->r_old_dentry) + ceph_get_cap_refs( + ceph_inode(req->r_old_dentry->d_parent->d_inode), + CEPH_CAP_PIN); + + /* issue */ + mutex_lock(&mdsc->mutex); + __register_request(mdsc, req, dir); + __do_request(mdsc, req); + + /* wait */ + if (!req->r_reply) { + mutex_unlock(&mdsc->mutex); + if (req->r_timeout) { + err = wait_for_completion_timeout(&req->r_completion, + req->r_timeout); + if (err > 0) + err = 0; + else if (err == 0) + req->r_reply = ERR_PTR(-EIO); + } else { + wait_for_completion(&req->r_completion); + } + mutex_lock(&mdsc->mutex); + } + + if (IS_ERR(req->r_reply)) { + err = PTR_ERR(req->r_reply); + req->r_reply = NULL; + + /* clean up */ + __unregister_request(mdsc, req); + if (!list_empty(&req->r_unsafe_item)) + list_del_init(&req->r_unsafe_item); + complete(&req->r_safe_completion); + } else if (req->r_err) { + err = req->r_err; + } else { + err = le32_to_cpu(req->r_reply_info.head->result); + } + mutex_unlock(&mdsc->mutex); + + dout("do_request %p done, result %d\n", req, err); + return err; +} + +/* + * Handle mds reply. + * + * We take the session mutex and parse and process the reply immediately. + * This preserves the logical ordering of replies, capabilities, etc., sent + * by the MDS as they are applied to our local cache. + */ +static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) +{ + struct ceph_mds_client *mdsc = session->s_mdsc; + struct ceph_mds_request *req; + struct ceph_mds_reply_head *head = msg->front.iov_base; + struct ceph_mds_reply_info_parsed *rinfo; /* parsed reply info */ + u64 tid; + int err, result; + int mds; + + if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS) + return; + if (msg->front.iov_len < sizeof(*head)) { + pr_err("mdsc_handle_reply got corrupt (short) reply\n"); + return; + } + + /* get request, session */ + tid = le64_to_cpu(head->tid); + mutex_lock(&mdsc->mutex); + req = __lookup_request(mdsc, tid); + if (!req) { + dout("handle_reply on unknown tid %llu\n", tid); + mutex_unlock(&mdsc->mutex); + return; + } + dout("handle_reply %p\n", req); + mds = le64_to_cpu(msg->hdr.src.name.num); + + /* correct session? */ + if (!req->r_session && req->r_session != session) { + pr_err("mdsc_handle_reply got %llu on session mds%d" + " not mds%d\n", tid, session->s_mds, + req->r_session ? req->r_session->s_mds : -1); + mutex_unlock(&mdsc->mutex); + goto out; + } + + /* dup? */ + if ((req->r_got_unsafe && !head->safe) || + (req->r_got_safe && head->safe)) { + pr_warning("got a dup %s reply on %llu from mds%d\n", + head->safe ? "safe" : "unsafe", tid, mds); + mutex_unlock(&mdsc->mutex); + goto out; + } + + result = le32_to_cpu(head->result); + + /* + * Tolerate 2 consecutive ESTALEs from the same mds. + * FIXME: we should be looking at the cap migrate_seq. + */ + if (result == -ESTALE) { + req->r_direct_mode = USE_AUTH_MDS; + req->r_num_stale++; + if (req->r_num_stale <= 2) { + __do_request(mdsc, req); + mutex_unlock(&mdsc->mutex); + goto out; + } + } else { + req->r_num_stale = 0; + } + + if (head->safe) { + req->r_got_safe = true; + __unregister_request(mdsc, req); + complete(&req->r_safe_completion); + + if (req->r_got_unsafe) { + /* + * We already handled the unsafe response, now do the + * cleanup. No need to examine the response; the MDS + * doesn't include any result info in the safe + * response. And even if it did, there is nothing + * useful we could do with a revised return value. + */ + dout("got safe reply %llu, mds%d\n", tid, mds); + list_del_init(&req->r_unsafe_item); + + /* last unsafe request during umount? */ + if (mdsc->stopping && !__get_oldest_tid(mdsc)) + complete(&mdsc->safe_umount_waiters); + mutex_unlock(&mdsc->mutex); + goto out; + } + } + + BUG_ON(req->r_reply); + + if (!head->safe) { + req->r_got_unsafe = true; + list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe); + } + + dout("handle_reply tid %lld result %d\n", tid, result); + rinfo = &req->r_reply_info; + err = parse_reply_info(msg, rinfo); + mutex_unlock(&mdsc->mutex); + + mutex_lock(&session->s_mutex); + if (err < 0) { + pr_err("mdsc_handle_reply got corrupt reply mds%d\n", mds); + goto out_err; + } + + /* snap trace */ + if (rinfo->snapblob_len) { + down_write(&mdsc->snap_rwsem); + ceph_update_snap_trace(mdsc, rinfo->snapblob, + rinfo->snapblob + rinfo->snapblob_len, + le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP); + downgrade_write(&mdsc->snap_rwsem); + } else { + down_read(&mdsc->snap_rwsem); + } + + /* insert trace into our cache */ + err = ceph_fill_trace(mdsc->client->sb, req, req->r_session); + if (err == 0) { + if (result == 0 && rinfo->dir_nr) + ceph_readdir_prepopulate(req, req->r_session); + ceph_unreserve_caps(&req->r_caps_reservation); + } + + up_read(&mdsc->snap_rwsem); +out_err: + if (err) { + req->r_err = err; + } else { + req->r_reply = msg; + ceph_msg_get(msg); + } + + add_cap_releases(mdsc, req->r_session, -1); + mutex_unlock(&session->s_mutex); + + /* kick calling process */ + complete_request(mdsc, req); +out: + ceph_mdsc_put_request(req); + return; +} + + + +/* + * handle mds notification that our request has been forwarded. + */ +static void handle_forward(struct ceph_mds_client *mdsc, struct ceph_msg *msg) +{ + struct ceph_mds_request *req; + u64 tid; + u32 next_mds; + u32 fwd_seq; + u8 must_resend; + int err = -EINVAL; + void *p = msg->front.iov_base; + void *end = p + msg->front.iov_len; + int from_mds, state; + + if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS) + goto bad; + from_mds = le64_to_cpu(msg->hdr.src.name.num); + + ceph_decode_need(&p, end, sizeof(u64)+2*sizeof(u32), bad); + ceph_decode_64(&p, tid); + ceph_decode_32(&p, next_mds); + ceph_decode_32(&p, fwd_seq); + ceph_decode_8(&p, must_resend); + + WARN_ON(must_resend); /* shouldn't happen. */ + + mutex_lock(&mdsc->mutex); + req = __lookup_request(mdsc, tid); + if (!req) { + dout("forward %llu dne\n", tid); + goto out; /* dup reply? */ + } + + state = mdsc->sessions[next_mds]->s_state; + if (fwd_seq <= req->r_num_fwd) { + dout("forward %llu to mds%d - old seq %d <= %d\n", + tid, next_mds, req->r_num_fwd, fwd_seq); + } else { + /* resend. forward race not possible; mds would drop */ + dout("forward %llu to mds%d (we resend)\n", tid, next_mds); + req->r_num_fwd = fwd_seq; + req->r_resend_mds = next_mds; + put_request_session(req); + __do_request(mdsc, req); + } + ceph_mdsc_put_request(req); +out: + mutex_unlock(&mdsc->mutex); + return; + +bad: + pr_err("mdsc_handle_forward decode error err=%d\n", err); +} + +/* + * handle a mds session control message + */ +static void handle_session(struct ceph_mds_session *session, + struct ceph_msg *msg) +{ + struct ceph_mds_client *mdsc = session->s_mdsc; + u32 op; + u64 seq; + int mds; + struct ceph_mds_session_head *h = msg->front.iov_base; + int wake = 0; + + if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS) + return; + mds = le64_to_cpu(msg->hdr.src.name.num); + + /* decode */ + if (msg->front.iov_len != sizeof(*h)) + goto bad; + op = le32_to_cpu(h->op); + seq = le64_to_cpu(h->seq); + + mutex_lock(&mdsc->mutex); + /* FIXME: this ttl calculation is generous */ + session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose; + mutex_unlock(&mdsc->mutex); + + mutex_lock(&session->s_mutex); + + dout("handle_session mds%d %s %p state %s seq %llu\n", + mds, ceph_session_op_name(op), session, + session_state_name(session->s_state), seq); + + if (session->s_state == CEPH_MDS_SESSION_HUNG) { + session->s_state = CEPH_MDS_SESSION_OPEN; + pr_info("mds%d came back\n", session->s_mds); + } + + switch (op) { + case CEPH_SESSION_OPEN: + session->s_state = CEPH_MDS_SESSION_OPEN; + renewed_caps(mdsc, session, 0); + wake = 1; + if (mdsc->stopping) + __close_session(mdsc, session); + break; + + case CEPH_SESSION_RENEWCAPS: + if (session->s_renew_seq == seq) + renewed_caps(mdsc, session, 1); + break; + + case CEPH_SESSION_CLOSE: + unregister_session(mdsc, mds); + remove_session_caps(session); + wake = 1; /* for good measure */ + complete(&mdsc->session_close_waiters); + kick_requests(mdsc, mds, 0); /* cur only */ + break; + + case CEPH_SESSION_STALE: + pr_info("mds%d caps went stale, renewing\n", + session->s_mds); + spin_lock(&session->s_cap_lock); + session->s_cap_gen++; + session->s_cap_ttl = 0; + spin_unlock(&session->s_cap_lock); + send_renew_caps(mdsc, session); + break; + + case CEPH_SESSION_RECALL_STATE: + trim_caps(mdsc, session, le32_to_cpu(h->max_caps)); + break; + + default: + pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds); + WARN_ON(1); + } + + mutex_unlock(&session->s_mutex); + if (wake) { + mutex_lock(&mdsc->mutex); + __wake_requests(mdsc, &session->s_waiting); + mutex_unlock(&mdsc->mutex); + } + return; + +bad: + pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds, + (int)msg->front.iov_len); + return; +} + + +/* + * called under session->mutex. + */ +static void replay_unsafe_requests(struct ceph_mds_client *mdsc, + struct ceph_mds_session *session) +{ + struct ceph_mds_request *req, *nreq; + int err; + + dout("replay_unsafe_requests mds%d\n", session->s_mds); + + mutex_lock(&mdsc->mutex); + list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) { + err = __prepare_send_request(mdsc, req, session->s_mds); + if (!err) { + ceph_msg_get(req->r_request); + ceph_con_send(&session->s_con, req->r_request); + } + } + mutex_unlock(&mdsc->mutex); +} + +/* + * Encode information about a cap for a reconnect with the MDS. + */ +struct encode_caps_data { + void **pp; + void *end; + int *num_caps; +}; + +static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap, + void *arg) +{ + struct ceph_mds_cap_reconnect *rec; + struct ceph_inode_info *ci; + struct encode_caps_data *data = (struct encode_caps_data *)arg; + void *p = *(data->pp); + void *end = data->end; + char *path; + int pathlen, err; + u64 pathbase; + struct dentry *dentry; + + ci = cap->ci; + + dout(" adding %p ino %llx.%llx cap %p %lld %s\n", + inode, ceph_vinop(inode), cap, cap->cap_id, + ceph_cap_string(cap->issued)); + ceph_decode_need(&p, end, sizeof(u64), needmore); + ceph_encode_64(&p, ceph_ino(inode)); + + dentry = d_find_alias(inode); + if (dentry) { + path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0); + if (IS_ERR(path)) { + err = PTR_ERR(path); + BUG_ON(err); + } + } else { + path = NULL; + pathlen = 0; + } + ceph_decode_need(&p, end, pathlen+4, needmore); + ceph_encode_string(&p, end, path, pathlen); + + ceph_decode_need(&p, end, sizeof(*rec), needmore); + rec = p; + p += sizeof(*rec); + BUG_ON(p > end); + spin_lock(&inode->i_lock); + cap->seq = 0; /* reset cap seq */ + cap->issue_seq = 0; /* and issue_seq */ + rec->cap_id = cpu_to_le64(cap->cap_id); + rec->pathbase = cpu_to_le64(pathbase); + rec->wanted = cpu_to_le32(__ceph_caps_wanted(ci)); + rec->issued = cpu_to_le32(cap->issued); + rec->size = cpu_to_le64(inode->i_size); + ceph_encode_timespec(&rec->mtime, &inode->i_mtime); + ceph_encode_timespec(&rec->atime, &inode->i_atime); + rec->snaprealm = cpu_to_le64(ci->i_snap_realm->ino); + spin_unlock(&inode->i_lock); + + kfree(path); + dput(dentry); + (*data->num_caps)++; + *(data->pp) = p; + return 0; +needmore: + return -ENOSPC; +} + + +/* + * If an MDS fails and recovers, clients need to reconnect in order to + * reestablish shared state. This includes all caps issued through + * this session _and_ the snap_realm hierarchy. Because it's not + * clear which snap realms the mds cares about, we send everything we + * know about.. that ensures we'll then get any new info the + * recovering MDS might have. + * + * This is a relatively heavyweight operation, but it's rare. + * + * called with mdsc->mutex held. + */ +static void send_mds_reconnect(struct ceph_mds_client *mdsc, int mds) +{ + struct ceph_mds_session *session; + struct ceph_msg *reply; + int newlen, len = 4 + 1; + void *p, *end; + int err; + int num_caps, num_realms = 0; + int got; + u64 next_snap_ino = 0; + __le32 *pnum_caps, *pnum_realms; + struct encode_caps_data iter_args; + + pr_info("reconnect to recovering mds%d\n", mds); + + /* find session */ + session = __ceph_lookup_mds_session(mdsc, mds); + mutex_unlock(&mdsc->mutex); /* drop lock for duration */ + + if (session) { + mutex_lock(&session->s_mutex); + + session->s_state = CEPH_MDS_SESSION_RECONNECTING; + session->s_seq = 0; + + ceph_con_open(&session->s_con, + ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); + + /* replay unsafe requests */ + replay_unsafe_requests(mdsc, session); + + /* estimate needed space */ + len += session->s_nr_caps * + (100+sizeof(struct ceph_mds_cap_reconnect)); + pr_info("estimating i need %d bytes for %d caps\n", + len, session->s_nr_caps); + } else { + dout("no session for mds%d, will send short reconnect\n", + mds); + } + + down_read(&mdsc->snap_rwsem); + +retry: + /* build reply */ + reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, len, 0, 0, NULL); + if (IS_ERR(reply)) { + err = PTR_ERR(reply); + pr_err("send_mds_reconnect ENOMEM on %d for mds%d\n", + len, mds); + goto out; + } + p = reply->front.iov_base; + end = p + len; + + if (!session) { + ceph_encode_8(&p, 1); /* session was closed */ + ceph_encode_32(&p, 0); + goto send; + } + dout("session %p state %s\n", session, + session_state_name(session->s_state)); + + /* traverse this session's caps */ + ceph_encode_8(&p, 0); + pnum_caps = p; + ceph_encode_32(&p, session->s_nr_caps); + num_caps = 0; + + iter_args.pp = &p; + iter_args.end = end; + iter_args.num_caps = &num_caps; + err = iterate_session_caps(session, encode_caps_cb, &iter_args); + if (err == -ENOSPC) + goto needmore; + if (err < 0) + goto out; + *pnum_caps = cpu_to_le32(num_caps); + + /* + * snaprealms. we provide mds with the ino, seq (version), and + * parent for all of our realms. If the mds has any newer info, + * it will tell us. + */ + next_snap_ino = 0; + /* save some space for the snaprealm count */ + pnum_realms = p; + ceph_decode_need(&p, end, sizeof(*pnum_realms), needmore); + p += sizeof(*pnum_realms); + num_realms = 0; + while (1) { + struct ceph_snap_realm *realm; + struct ceph_mds_snaprealm_reconnect *sr_rec; + got = radix_tree_gang_lookup(&mdsc->snap_realms, + (void **)&realm, next_snap_ino, 1); + if (!got) + break; + + dout(" adding snap realm %llx seq %lld parent %llx\n", + realm->ino, realm->seq, realm->parent_ino); + ceph_decode_need(&p, end, sizeof(*sr_rec), needmore); + sr_rec = p; + sr_rec->ino = cpu_to_le64(realm->ino); + sr_rec->seq = cpu_to_le64(realm->seq); + sr_rec->parent = cpu_to_le64(realm->parent_ino); + p += sizeof(*sr_rec); + num_realms++; + next_snap_ino = realm->ino + 1; + } + *pnum_realms = cpu_to_le32(num_realms); + +send: + reply->front.iov_len = p - reply->front.iov_base; + reply->hdr.front_len = cpu_to_le32(reply->front.iov_len); + dout("final len was %u (guessed %d)\n", + (unsigned)reply->front.iov_len, len); + ceph_con_send(&session->s_con, reply); + + if (session) { + session->s_state = CEPH_MDS_SESSION_OPEN; + __wake_requests(mdsc, &session->s_waiting); + } + +out: + up_read(&mdsc->snap_rwsem); + if (session) { + mutex_unlock(&session->s_mutex); + ceph_put_mds_session(session); + } + mutex_lock(&mdsc->mutex); + return; + +needmore: + /* + * we need a larger buffer. this doesn't very accurately + * factor in snap realms, but it's safe. + */ + num_caps += num_realms; + newlen = len * ((100 * (session->s_nr_caps+3)) / (num_caps + 1)) / 100; + pr_info("i guessed %d, and did %d of %d caps, retrying with %d\n", + len, num_caps, session->s_nr_caps, newlen); + len = newlen; + ceph_msg_put(reply); + goto retry; +} + + +/* + * compare old and new mdsmaps, kicking requests + * and closing out old connections as necessary + * + * called under mdsc->mutex. + */ +static void check_new_map(struct ceph_mds_client *mdsc, + struct ceph_mdsmap *newmap, + struct ceph_mdsmap *oldmap) +{ + int i; + int oldstate, newstate; + struct ceph_mds_session *s; + + dout("check_new_map new %u old %u\n", + newmap->m_epoch, oldmap->m_epoch); + + for (i = 0; i < oldmap->m_max_mds && i < mdsc->max_sessions; i++) { + if (mdsc->sessions[i] == NULL) + continue; + s = mdsc->sessions[i]; + oldstate = ceph_mdsmap_get_state(oldmap, i); + newstate = ceph_mdsmap_get_state(newmap, i); + + dout("check_new_map mds%d state %s -> %s (session %s)\n", + i, ceph_mds_state_name(oldstate), + ceph_mds_state_name(newstate), + session_state_name(s->s_state)); + + if (memcmp(ceph_mdsmap_get_addr(oldmap, i), + ceph_mdsmap_get_addr(newmap, i), + sizeof(struct ceph_entity_addr))) { + if (s->s_state == CEPH_MDS_SESSION_OPENING) { + /* the session never opened, just close it + * out now */ + __wake_requests(mdsc, &s->s_waiting); + unregister_session(mdsc, i); + } else { + /* just close it */ + mutex_unlock(&mdsc->mutex); + mutex_lock(&s->s_mutex); + mutex_lock(&mdsc->mutex); + ceph_con_close(&s->s_con); + mutex_unlock(&s->s_mutex); + s->s_state = CEPH_MDS_SESSION_RESTARTING; + } + + /* kick any requests waiting on the recovering mds */ + kick_requests(mdsc, i, 1); + } else if (oldstate == newstate) { + continue; /* nothing new with this mds */ + } + + /* + * send reconnect? + */ + if (s->s_state == CEPH_MDS_SESSION_RESTARTING && + newstate >= CEPH_MDS_STATE_RECONNECT) + send_mds_reconnect(mdsc, i); + + /* + * kick requests on any mds that has gone active. + * + * kick requests on cur or forwarder: we may have sent + * the request to mds1, mds1 told us it forwarded it + * to mds2, but then we learn mds1 failed and can't be + * sure it successfully forwarded our request before + * it died. + */ + if (oldstate < CEPH_MDS_STATE_ACTIVE && + newstate >= CEPH_MDS_STATE_ACTIVE) { + kick_requests(mdsc, i, 1); + ceph_kick_flushing_caps(mdsc, s); + } + } +} + + + +/* + * leases + */ + +/* + * caller must hold session s_mutex, dentry->d_lock + */ +void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry) +{ + struct ceph_dentry_info *di = ceph_dentry(dentry); + + ceph_put_mds_session(di->lease_session); + di->lease_session = NULL; +} + +static void handle_lease(struct ceph_mds_client *mdsc, struct ceph_msg *msg) +{ + struct super_block *sb = mdsc->client->sb; + struct inode *inode; + struct ceph_mds_session *session; + struct ceph_inode_info *ci; + struct dentry *parent, *dentry; + struct ceph_dentry_info *di; + int mds; + struct ceph_mds_lease *h = msg->front.iov_base; + struct ceph_vino vino; + int mask; + struct qstr dname; + int release = 0; + + if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS) + return; + mds = le64_to_cpu(msg->hdr.src.name.num); + dout("handle_lease from mds%d\n", mds); + + /* decode */ + if (msg->front.iov_len < sizeof(*h) + sizeof(u32)) + goto bad; + vino.ino = le64_to_cpu(h->ino); + vino.snap = CEPH_NOSNAP; + mask = le16_to_cpu(h->mask); + dname.name = (void *)h + sizeof(*h) + sizeof(u32); + dname.len = msg->front.iov_len - sizeof(*h) - sizeof(u32); + if (dname.len != get_unaligned_le32(h+1)) + goto bad; + + /* find session */ + mutex_lock(&mdsc->mutex); + session = __ceph_lookup_mds_session(mdsc, mds); + mutex_unlock(&mdsc->mutex); + if (!session) { + pr_err("handle_lease got lease but no session mds%d\n", mds); + return; + } + + mutex_lock(&session->s_mutex); + session->s_seq++; + + /* lookup inode */ + inode = ceph_find_inode(sb, vino); + dout("handle_lease '%s', mask %d, ino %llx %p\n", + ceph_lease_op_name(h->action), mask, vino.ino, inode); + if (inode == NULL) { + dout("handle_lease no inode %llx\n", vino.ino); + goto release; + } + ci = ceph_inode(inode); + + /* dentry */ + parent = d_find_alias(inode); + if (!parent) { + dout("no parent dentry on inode %p\n", inode); + WARN_ON(1); + goto release; /* hrm... */ + } + dname.hash = full_name_hash(dname.name, dname.len); + dentry = d_lookup(parent, &dname); + dput(parent); + if (!dentry) + goto release; + + spin_lock(&dentry->d_lock); + di = ceph_dentry(dentry); + switch (h->action) { + case CEPH_MDS_LEASE_REVOKE: + if (di && di->lease_session == session) { + h->seq = cpu_to_le32(di->lease_seq); + __ceph_mdsc_drop_dentry_lease(dentry); + } + release = 1; + break; + + case CEPH_MDS_LEASE_RENEW: + if (di && di->lease_session == session && + di->lease_gen == session->s_cap_gen && + di->lease_renew_from && + di->lease_renew_after == 0) { + unsigned long duration = + le32_to_cpu(h->duration_ms) * HZ / 1000; + + di->lease_seq = le32_to_cpu(h->seq); + dentry->d_time = di->lease_renew_from + duration; + di->lease_renew_after = di->lease_renew_from + + (duration >> 1); + di->lease_renew_from = 0; + } + break; + } + spin_unlock(&dentry->d_lock); + dput(dentry); + + if (!release) + goto out; + +release: + /* let's just reuse the same message */ + h->action = CEPH_MDS_LEASE_REVOKE_ACK; + ceph_msg_get(msg); + ceph_con_send(&session->s_con, msg); + +out: + iput(inode); + mutex_unlock(&session->s_mutex); + ceph_put_mds_session(session); + return; + +bad: + pr_err("corrupt lease message\n"); +} + +void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session, + struct inode *inode, + struct dentry *dentry, char action, + u32 seq) +{ + struct ceph_msg *msg; + struct ceph_mds_lease *lease; + int len = sizeof(*lease) + sizeof(u32); + int dnamelen = 0; + + dout("lease_send_msg inode %p dentry %p %s to mds%d\n", + inode, dentry, ceph_lease_op_name(action), session->s_mds); + dnamelen = dentry->d_name.len; + len += dnamelen; + + msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, 0, 0, NULL); + if (IS_ERR(msg)) + return; + lease = msg->front.iov_base; + lease->action = action; + lease->mask = cpu_to_le16(CEPH_LOCK_DN); + lease->ino = cpu_to_le64(ceph_vino(inode).ino); + lease->first = lease->last = cpu_to_le64(ceph_vino(inode).snap); + lease->seq = cpu_to_le32(seq); + put_unaligned_le32(dnamelen, lease + 1); + memcpy((void *)(lease + 1) + 4, dentry->d_name.name, dnamelen); + + /* + * if this is a preemptive lease RELEASE, no need to + * flush request stream, since the actual request will + * soon follow. + */ + msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE); + + ceph_con_send(&session->s_con, msg); +} + +/* + * Preemptively release a lease we expect to invalidate anyway. + * Pass @inode always, @dentry is optional. + */ +void ceph_mdsc_lease_release(struct ceph_mds_client *mdsc, struct inode *inode, + struct dentry *dentry, int mask) +{ + struct ceph_dentry_info *di; + struct ceph_mds_session *session; + u32 seq; + + BUG_ON(inode == NULL); + BUG_ON(dentry == NULL); + BUG_ON(mask != CEPH_LOCK_DN); + + /* is dentry lease valid? */ + spin_lock(&dentry->d_lock); + di = ceph_dentry(dentry); + if (!di || !di->lease_session || + di->lease_session->s_mds < 0 || + di->lease_gen != di->lease_session->s_cap_gen || + !time_before(jiffies, dentry->d_time)) { + dout("lease_release inode %p dentry %p -- " + "no lease on %d\n", + inode, dentry, mask); + spin_unlock(&dentry->d_lock); + return; + } + + /* we do have a lease on this dentry; note mds and seq */ + session = ceph_get_mds_session(di->lease_session); + seq = di->lease_seq; + __ceph_mdsc_drop_dentry_lease(dentry); + spin_unlock(&dentry->d_lock); + + dout("lease_release inode %p dentry %p mask %d to mds%d\n", + inode, dentry, mask, session->s_mds); + ceph_mdsc_lease_send_msg(session, inode, dentry, + CEPH_MDS_LEASE_RELEASE, seq); + ceph_put_mds_session(session); +} + +/* + * drop all leases (and dentry refs) in preparation for umount + */ +static void drop_leases(struct ceph_mds_client *mdsc) +{ + int i; + + dout("drop_leases\n"); + mutex_lock(&mdsc->mutex); + for (i = 0; i < mdsc->max_sessions; i++) { + struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i); + if (!s) + continue; + mutex_unlock(&mdsc->mutex); + mutex_lock(&s->s_mutex); + mutex_unlock(&s->s_mutex); + ceph_put_mds_session(s); + mutex_lock(&mdsc->mutex); + } + mutex_unlock(&mdsc->mutex); +} + + + +/* + * delayed work -- periodically trim expired leases, renew caps with mds + */ +static void schedule_delayed(struct ceph_mds_client *mdsc) +{ + int delay = 5; + unsigned hz = round_jiffies_relative(HZ * delay); + schedule_delayed_work(&mdsc->delayed_work, hz); +} + +static void delayed_work(struct work_struct *work) +{ + int i; + struct ceph_mds_client *mdsc = + container_of(work, struct ceph_mds_client, delayed_work.work); + int renew_interval; + int renew_caps; + + dout("mdsc delayed_work\n"); + ceph_check_delayed_caps(mdsc, 0); + + mutex_lock(&mdsc->mutex); + renew_interval = mdsc->mdsmap->m_session_timeout >> 2; + renew_caps = time_after_eq(jiffies, HZ*renew_interval + + mdsc->last_renew_caps); + if (renew_caps) + mdsc->last_renew_caps = jiffies; + + for (i = 0; i < mdsc->max_sessions; i++) { + struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i); + if (s == NULL) + continue; + if (s->s_state == CEPH_MDS_SESSION_CLOSING) { + dout("resending session close request for mds%d\n", + s->s_mds); + request_close_session(mdsc, s); + ceph_put_mds_session(s); + continue; + } + if (s->s_ttl && time_after(jiffies, s->s_ttl)) { + if (s->s_state == CEPH_MDS_SESSION_OPEN) { + s->s_state = CEPH_MDS_SESSION_HUNG; + pr_info("mds%d hung\n", s->s_mds); + } + } + if (s->s_state < CEPH_MDS_SESSION_OPEN) { + /* this mds is failed or recovering, just wait */ + ceph_put_mds_session(s); + continue; + } + mutex_unlock(&mdsc->mutex); + + mutex_lock(&s->s_mutex); + if (renew_caps) + send_renew_caps(mdsc, s); + else + ceph_con_keepalive(&s->s_con); + add_cap_releases(mdsc, s, -1); + send_cap_releases(mdsc, s); + mutex_unlock(&s->s_mutex); + ceph_put_mds_session(s); + + mutex_lock(&mdsc->mutex); + } + mutex_unlock(&mdsc->mutex); + + schedule_delayed(mdsc); +} + + +void ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client) +{ + mdsc->client = client; + mutex_init(&mdsc->mutex); + mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS); + init_completion(&mdsc->safe_umount_waiters); + init_completion(&mdsc->session_close_waiters); + INIT_LIST_HEAD(&mdsc->waiting_for_map); + mdsc->sessions = NULL; + mdsc->max_sessions = 0; + mdsc->stopping = 0; + init_rwsem(&mdsc->snap_rwsem); + INIT_RADIX_TREE(&mdsc->snap_realms, GFP_NOFS); + INIT_LIST_HEAD(&mdsc->snap_empty); + spin_lock_init(&mdsc->snap_empty_lock); + mdsc->last_tid = 0; + INIT_RADIX_TREE(&mdsc->request_tree, GFP_NOFS); + INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work); + mdsc->last_renew_caps = jiffies; + INIT_LIST_HEAD(&mdsc->cap_delay_list); + spin_lock_init(&mdsc->cap_delay_lock); + INIT_LIST_HEAD(&mdsc->snap_flush_list); + spin_lock_init(&mdsc->snap_flush_lock); + mdsc->cap_flush_seq = 0; + INIT_LIST_HEAD(&mdsc->cap_dirty); + mdsc->num_cap_flushing = 0; + spin_lock_init(&mdsc->cap_dirty_lock); + init_waitqueue_head(&mdsc->cap_flushing_wq); + spin_lock_init(&mdsc->dentry_lru_lock); + INIT_LIST_HEAD(&mdsc->dentry_lru); +} + +/* + * Wait for safe replies on open mds requests. If we time out, drop + * all requests from the tree to avoid dangling dentry refs. + */ +static void wait_requests(struct ceph_mds_client *mdsc) +{ + struct ceph_mds_request *req; + struct ceph_client *client = mdsc->client; + + mutex_lock(&mdsc->mutex); + if (__get_oldest_tid(mdsc)) { + mutex_unlock(&mdsc->mutex); + dout("wait_requests waiting for requests\n"); + wait_for_completion_timeout(&mdsc->safe_umount_waiters, + client->mount_args.mount_timeout * HZ); + mutex_lock(&mdsc->mutex); + + /* tear down remaining requests */ + while (radix_tree_gang_lookup(&mdsc->request_tree, + (void **)&req, 0, 1)) { + dout("wait_requests timed out on tid %llu\n", + req->r_tid); + radix_tree_delete(&mdsc->request_tree, req->r_tid); + ceph_mdsc_put_request(req); + } + } + mutex_unlock(&mdsc->mutex); + dout("wait_requests done\n"); +} + +/* + * called before mount is ro, and before dentries are torn down. + * (hmm, does this still race with new lookups?) + */ +void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc) +{ + dout("pre_umount\n"); + mdsc->stopping = 1; + + drop_leases(mdsc); + ceph_check_delayed_caps(mdsc, 1); + wait_requests(mdsc); +} + +/* + * wait for all write mds requests to flush. + */ +static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid) +{ + struct ceph_mds_request *req; + u64 next_tid = 0; + int got; + + mutex_lock(&mdsc->mutex); + dout("wait_unsafe_requests want %lld\n", want_tid); + while (1) { + got = radix_tree_gang_lookup(&mdsc->request_tree, (void **)&req, + next_tid, 1); + if (!got) + break; + if (req->r_tid > want_tid) + break; + + next_tid = req->r_tid + 1; + if ((req->r_op & CEPH_MDS_OP_WRITE) == 0) + continue; /* not a write op */ + + ceph_mdsc_get_request(req); + mutex_unlock(&mdsc->mutex); + dout("wait_unsafe_requests wait on %llu (want %llu)\n", + req->r_tid, want_tid); + wait_for_completion(&req->r_safe_completion); + mutex_lock(&mdsc->mutex); + ceph_mdsc_put_request(req); + } + mutex_unlock(&mdsc->mutex); + dout("wait_unsafe_requests done\n"); +} + +void ceph_mdsc_sync(struct ceph_mds_client *mdsc) +{ + u64 want_tid, want_flush; + + dout("sync\n"); + mutex_lock(&mdsc->mutex); + want_tid = mdsc->last_tid; + want_flush = mdsc->cap_flush_seq; + mutex_unlock(&mdsc->mutex); + dout("sync want tid %lld flush_seq %lld\n", want_tid, want_flush); + + ceph_check_delayed_caps(mdsc, 1); + + wait_unsafe_requests(mdsc, want_tid); + wait_event(mdsc->cap_flushing_wq, check_cap_flush(mdsc, want_flush)); +} + + +/* + * called after sb is ro. + */ +void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc) +{ + struct ceph_mds_session *session; + int i; + int n; + struct ceph_client *client = mdsc->client; + unsigned long started, timeout = client->mount_args.mount_timeout * HZ; + + dout("close_sessions\n"); + + mutex_lock(&mdsc->mutex); + + /* close sessions */ + started = jiffies; + while (time_before(jiffies, started + timeout)) { + dout("closing sessions\n"); + n = 0; + for (i = 0; i < mdsc->max_sessions; i++) { + session = __ceph_lookup_mds_session(mdsc, i); + if (!session) + continue; + mutex_unlock(&mdsc->mutex); + mutex_lock(&session->s_mutex); + __close_session(mdsc, session); + mutex_unlock(&session->s_mutex); + ceph_put_mds_session(session); + mutex_lock(&mdsc->mutex); + n++; + } + if (n == 0) + break; + + if (client->mount_state == CEPH_MOUNT_SHUTDOWN) + break; + + dout("waiting for sessions to close\n"); + mutex_unlock(&mdsc->mutex); + wait_for_completion_timeout(&mdsc->session_close_waiters, + timeout); + mutex_lock(&mdsc->mutex); + } + + /* tear down remaining sessions */ + for (i = 0; i < mdsc->max_sessions; i++) { + if (mdsc->sessions[i]) { + session = get_session(mdsc->sessions[i]); + unregister_session(mdsc, i); + mutex_unlock(&mdsc->mutex); + mutex_lock(&session->s_mutex); + remove_session_caps(session); + mutex_unlock(&session->s_mutex); + ceph_put_mds_session(session); + mutex_lock(&mdsc->mutex); + } + } + + WARN_ON(!list_empty(&mdsc->cap_delay_list)); + + mutex_unlock(&mdsc->mutex); + + ceph_cleanup_empty_realms(mdsc); + + cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ + + dout("stopped\n"); +} + +void ceph_mdsc_stop(struct ceph_mds_client *mdsc) +{ + dout("stop\n"); + cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ + if (mdsc->mdsmap) + ceph_mdsmap_destroy(mdsc->mdsmap); + kfree(mdsc->sessions); +} + + +/* + * handle mds map update. + */ +void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg) +{ + u32 epoch; + u32 maplen; + void *p = msg->front.iov_base; + void *end = p + msg->front.iov_len; + struct ceph_mdsmap *newmap, *oldmap; + struct ceph_fsid fsid; + int err = -EINVAL; + + ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad); + ceph_decode_copy(&p, &fsid, sizeof(fsid)); + if (ceph_fsid_compare(&fsid, &mdsc->client->monc.monmap->fsid)) { + pr_err("got mdsmap with wrong fsid\n"); + return; + } + ceph_decode_32(&p, epoch); + ceph_decode_32(&p, maplen); + dout("handle_map epoch %u len %d\n", epoch, (int)maplen); + + /* do we need it? */ + ceph_monc_got_mdsmap(&mdsc->client->monc, epoch); + mutex_lock(&mdsc->mutex); + if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) { + dout("handle_map epoch %u <= our %u\n", + epoch, mdsc->mdsmap->m_epoch); + mutex_unlock(&mdsc->mutex); + return; + } + + newmap = ceph_mdsmap_decode(&p, end); + if (IS_ERR(newmap)) { + err = PTR_ERR(newmap); + goto bad_unlock; + } + + /* swap into place */ + if (mdsc->mdsmap) { + oldmap = mdsc->mdsmap; + mdsc->mdsmap = newmap; + check_new_map(mdsc, newmap, oldmap); + ceph_mdsmap_destroy(oldmap); + } else { + mdsc->mdsmap = newmap; /* first mds map */ + } + mdsc->client->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size; + + __wake_requests(mdsc, &mdsc->waiting_for_map); + + mutex_unlock(&mdsc->mutex); + schedule_delayed(mdsc); + return; + +bad_unlock: + mutex_unlock(&mdsc->mutex); +bad: + pr_err("error decoding mdsmap %d\n", err); + return; +} + +static struct ceph_connection *con_get(struct ceph_connection *con) +{ + struct ceph_mds_session *s = con->private; + + if (get_session(s)) { + dout("mdsc con_get %p %d -> %d\n", s, + atomic_read(&s->s_ref) - 1, atomic_read(&s->s_ref)); + return con; + } + dout("mdsc con_get %p FAIL\n", s); + return NULL; +} + +static void con_put(struct ceph_connection *con) +{ + struct ceph_mds_session *s = con->private; + + dout("mdsc con_put %p %d -> %d\n", s, atomic_read(&s->s_ref), + atomic_read(&s->s_ref) - 1); + ceph_put_mds_session(s); +} + +/* + * if the client is unresponsive for long enough, the mds will kill + * the session entirely. + */ +static void peer_reset(struct ceph_connection *con) +{ + struct ceph_mds_session *s = con->private; + + pr_err("mds%d gave us the boot. IMPLEMENT RECONNECT.\n", + s->s_mds); +} + +static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) +{ + struct ceph_mds_session *s = con->private; + struct ceph_mds_client *mdsc = s->s_mdsc; + int type = le16_to_cpu(msg->hdr.type); + + switch (type) { + case CEPH_MSG_MDS_MAP: + ceph_mdsc_handle_map(mdsc, msg); + break; + case CEPH_MSG_CLIENT_SESSION: + handle_session(s, msg); + break; + case CEPH_MSG_CLIENT_REPLY: + handle_reply(s, msg); + break; + case CEPH_MSG_CLIENT_REQUEST_FORWARD: + handle_forward(mdsc, msg); + break; + case CEPH_MSG_CLIENT_CAPS: + ceph_handle_caps(s, msg); + break; + case CEPH_MSG_CLIENT_SNAP: + ceph_handle_snap(mdsc, msg); + break; + case CEPH_MSG_CLIENT_LEASE: + handle_lease(mdsc, msg); + break; + + default: + pr_err("received unknown message type %d %s\n", type, + ceph_msg_type_name(type)); + } + ceph_msg_put(msg); +} + +const static struct ceph_connection_operations mds_con_ops = { + .get = con_get, + .put = con_put, + .dispatch = dispatch, + .peer_reset = peer_reset, + .alloc_msg = ceph_alloc_msg, + .alloc_middle = ceph_alloc_middle, +}; + + + + +/* eof */ diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h new file mode 100644 index 000000000000..f566e9c84295 --- /dev/null +++ b/fs/ceph/mds_client.h @@ -0,0 +1,321 @@ +#ifndef _FS_CEPH_MDS_CLIENT_H +#define _FS_CEPH_MDS_CLIENT_H + +#include +#include +#include +#include +#include + +#include "types.h" +#include "messenger.h" +#include "mdsmap.h" + +/* + * Some lock dependencies: + * + * session->s_mutex + * mdsc->mutex + * + * mdsc->snap_rwsem + * + * inode->i_lock + * mdsc->snap_flush_lock + * mdsc->cap_delay_lock + * + */ + +struct ceph_client; +struct ceph_cap; + +/* + * parsed info about a single inode. pointers are into the encoded + * on-wire structures within the mds reply message payload. + */ +struct ceph_mds_reply_info_in { + struct ceph_mds_reply_inode *in; + u32 symlink_len; + char *symlink; + u32 xattr_len; + char *xattr_data; +}; + +/* + * parsed info about an mds reply, including information about the + * target inode and/or its parent directory and dentry, and directory + * contents (for readdir results). + */ +struct ceph_mds_reply_info_parsed { + struct ceph_mds_reply_head *head; + + struct ceph_mds_reply_info_in diri, targeti; + struct ceph_mds_reply_dirfrag *dirfrag; + char *dname; + u32 dname_len; + struct ceph_mds_reply_lease *dlease; + + struct ceph_mds_reply_dirfrag *dir_dir; + int dir_nr; + char **dir_dname; + u32 *dir_dname_len; + struct ceph_mds_reply_lease **dir_dlease; + struct ceph_mds_reply_info_in *dir_in; + u8 dir_complete, dir_end; + + /* encoded blob describing snapshot contexts for certain + operations (e.g., open) */ + void *snapblob; + int snapblob_len; +}; + + +/* + * cap releases are batched and sent to the MDS en masse. + */ +#define CEPH_CAPS_PER_RELEASE ((PAGE_CACHE_SIZE - \ + sizeof(struct ceph_mds_cap_release)) / \ + sizeof(struct ceph_mds_cap_item)) + + +/* + * state associated with each MDS<->client session + */ +enum { + CEPH_MDS_SESSION_NEW = 1, + CEPH_MDS_SESSION_OPENING = 2, + CEPH_MDS_SESSION_OPEN = 3, + CEPH_MDS_SESSION_HUNG = 4, + CEPH_MDS_SESSION_CLOSING = 5, + CEPH_MDS_SESSION_RESTARTING = 6, + CEPH_MDS_SESSION_RECONNECTING = 7, +}; + +struct ceph_mds_session { + struct ceph_mds_client *s_mdsc; + int s_mds; + int s_state; + unsigned long s_ttl; /* time until mds kills us */ + u64 s_seq; /* incoming msg seq # */ + struct mutex s_mutex; /* serialize session messages */ + + struct ceph_connection s_con; + + /* protected by s_cap_lock */ + spinlock_t s_cap_lock; + u32 s_cap_gen; /* inc each time we get mds stale msg */ + unsigned long s_cap_ttl; /* when session caps expire */ + struct list_head s_caps; /* all caps issued by this session */ + int s_nr_caps, s_trim_caps; + int s_num_cap_releases; + struct list_head s_cap_releases; /* waiting cap_release messages */ + struct list_head s_cap_releases_done; /* ready to send */ + + /* protected by mutex */ + struct list_head s_cap_flushing; /* inodes w/ flushing caps */ + struct list_head s_cap_snaps_flushing; + unsigned long s_renew_requested; /* last time we sent a renew req */ + u64 s_renew_seq; + + atomic_t s_ref; + struct list_head s_waiting; /* waiting requests */ + struct list_head s_unsafe; /* unsafe requests */ +}; + +/* + * modes of choosing which MDS to send a request to + */ +enum { + USE_ANY_MDS, + USE_RANDOM_MDS, + USE_AUTH_MDS, /* prefer authoritative mds for this metadata item */ +}; + +struct ceph_mds_request; +struct ceph_mds_client; + +/* + * request completion callback + */ +typedef void (*ceph_mds_request_callback_t) (struct ceph_mds_client *mdsc, + struct ceph_mds_request *req); + +/* + * an in-flight mds request + */ +struct ceph_mds_request { + u64 r_tid; /* transaction id */ + + int r_op; /* mds op code */ + int r_mds; + + /* operation on what? */ + struct inode *r_inode; /* arg1 */ + struct dentry *r_dentry; /* arg1 */ + struct dentry *r_old_dentry; /* arg2: rename from or link from */ + char *r_path1, *r_path2; + struct ceph_vino r_ino1, r_ino2; + + struct inode *r_locked_dir; /* dir (if any) i_mutex locked by vfs */ + struct inode *r_target_inode; /* resulting inode */ + + union ceph_mds_request_args r_args; + int r_fmode; /* file mode, if expecting cap */ + + /* for choosing which mds to send this request to */ + int r_direct_mode; + u32 r_direct_hash; /* choose dir frag based on this dentry hash */ + bool r_direct_is_hash; /* true if r_direct_hash is valid */ + + /* data payload is used for xattr ops */ + struct page **r_pages; + int r_num_pages; + int r_data_len; + + /* what caps shall we drop? */ + int r_inode_drop, r_inode_unless; + int r_dentry_drop, r_dentry_unless; + int r_old_dentry_drop, r_old_dentry_unless; + struct inode *r_old_inode; + int r_old_inode_drop, r_old_inode_unless; + + struct ceph_msg *r_request; /* original request */ + struct ceph_msg *r_reply; + struct ceph_mds_reply_info_parsed r_reply_info; + int r_err; + + unsigned long r_timeout; /* optional. jiffies */ + unsigned long r_started; /* start time to measure timeout against */ + unsigned long r_request_started; /* start time for mds request only, + used to measure lease durations */ + + /* link unsafe requests to parent directory, for fsync */ + struct inode *r_unsafe_dir; + struct list_head r_unsafe_dir_item; + + struct ceph_mds_session *r_session; + + int r_attempts; /* resend attempts */ + int r_num_fwd; /* number of forward attempts */ + int r_num_stale; + int r_resend_mds; /* mds to resend to next, if any*/ + + atomic_t r_ref; + struct list_head r_wait; + struct completion r_completion; + struct completion r_safe_completion; + ceph_mds_request_callback_t r_callback; + struct list_head r_unsafe_item; /* per-session unsafe list item */ + bool r_got_unsafe, r_got_safe; + + bool r_did_prepopulate; + u32 r_readdir_offset; + + struct ceph_cap_reservation r_caps_reservation; + int r_num_caps; +}; + +/* + * mds client state + */ +struct ceph_mds_client { + struct ceph_client *client; + struct mutex mutex; /* all nested structures */ + + struct ceph_mdsmap *mdsmap; + struct completion safe_umount_waiters, session_close_waiters; + struct list_head waiting_for_map; + + struct ceph_mds_session **sessions; /* NULL for mds if no session */ + int max_sessions; /* len of s_mds_sessions */ + int stopping; /* true if shutting down */ + + /* + * snap_rwsem will cover cap linkage into snaprealms, and + * realm snap contexts. (later, we can do per-realm snap + * contexts locks..) the empty list contains realms with no + * references (implying they contain no inodes with caps) that + * should be destroyed. + */ + struct rw_semaphore snap_rwsem; + struct radix_tree_root snap_realms; + struct list_head snap_empty; + spinlock_t snap_empty_lock; /* protect snap_empty */ + + u64 last_tid; /* most recent mds request */ + struct radix_tree_root request_tree; /* pending mds requests */ + struct delayed_work delayed_work; /* delayed work */ + unsigned long last_renew_caps; /* last time we renewed our caps */ + struct list_head cap_delay_list; /* caps with delayed release */ + spinlock_t cap_delay_lock; /* protects cap_delay_list */ + struct list_head snap_flush_list; /* cap_snaps ready to flush */ + spinlock_t snap_flush_lock; + + u64 cap_flush_seq; + struct list_head cap_dirty; /* inodes with dirty caps */ + int num_cap_flushing; /* # caps we are flushing */ + spinlock_t cap_dirty_lock; /* protects above items */ + wait_queue_head_t cap_flushing_wq; + + struct dentry *debugfs_file; + + spinlock_t dentry_lru_lock; + struct list_head dentry_lru; + int num_dentry; +}; + +extern const char *ceph_mds_op_name(int op); + +extern struct ceph_mds_session * +__ceph_lookup_mds_session(struct ceph_mds_client *, int mds); + +static inline struct ceph_mds_session * +ceph_get_mds_session(struct ceph_mds_session *s) +{ + atomic_inc(&s->s_ref); + return s; +} + +extern void ceph_put_mds_session(struct ceph_mds_session *s); + +extern int ceph_send_msg_mds(struct ceph_mds_client *mdsc, + struct ceph_msg *msg, int mds); + +extern void ceph_mdsc_init(struct ceph_mds_client *mdsc, + struct ceph_client *client); +extern void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc); +extern void ceph_mdsc_stop(struct ceph_mds_client *mdsc); + +extern void ceph_mdsc_sync(struct ceph_mds_client *mdsc); + +extern void ceph_mdsc_lease_release(struct ceph_mds_client *mdsc, + struct inode *inode, + struct dentry *dn, int mask); + +extern struct ceph_mds_request * +ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode); +extern void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, + struct ceph_mds_request *req); +extern int ceph_mdsc_do_request(struct ceph_mds_client *mdsc, + struct inode *dir, + struct ceph_mds_request *req); +static inline void ceph_mdsc_get_request(struct ceph_mds_request *req) +{ + atomic_inc(&req->r_ref); +} +extern void ceph_mdsc_put_request(struct ceph_mds_request *req); + +extern void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc); + +extern char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base, + int stop_on_nosnap); + +extern void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry); +extern void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session, + struct inode *inode, + struct dentry *dentry, char action, + u32 seq); + +extern void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, + struct ceph_msg *msg); + +#endif diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c new file mode 100644 index 000000000000..15913cbeb289 --- /dev/null +++ b/fs/ceph/mdsmap.c @@ -0,0 +1,166 @@ +#include "ceph_debug.h" + +#include +#include +#include +#include +#include + +#include "mdsmap.h" +#include "messenger.h" +#include "decode.h" + +#include "super.h" + + +/* + * choose a random mds that is "up" (i.e. has a state > 0), or -1. + */ +int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m) +{ + int n = 0; + int i; + char r; + + /* count */ + for (i = 0; i < m->m_max_mds; i++) + if (m->m_info[i].state > 0) + n++; + if (n == 0) + return -1; + + /* pick */ + get_random_bytes(&r, 1); + n = r % n; + i = 0; + for (i = 0; n > 0; i++, n--) + while (m->m_info[i].state <= 0) + i++; + + return i; +} + +/* + * Decode an MDS map + * + * Ignore any fields we don't care about (there are quite a few of + * them). + */ +struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end) +{ + struct ceph_mdsmap *m; + int i, j, n; + int err = -EINVAL; + u16 version; + + m = kzalloc(sizeof(*m), GFP_NOFS); + if (m == NULL) + return ERR_PTR(-ENOMEM); + + ceph_decode_16_safe(p, end, version, bad); + + ceph_decode_need(p, end, 8*sizeof(u32) + sizeof(u64), bad); + ceph_decode_32(p, m->m_epoch); + ceph_decode_32(p, m->m_client_epoch); + ceph_decode_32(p, m->m_last_failure); + ceph_decode_32(p, m->m_root); + ceph_decode_32(p, m->m_session_timeout); + ceph_decode_32(p, m->m_session_autoclose); + ceph_decode_64(p, m->m_max_file_size); + ceph_decode_32(p, m->m_max_mds); + + m->m_info = kcalloc(m->m_max_mds, sizeof(*m->m_info), GFP_NOFS); + if (m->m_info == NULL) + goto badmem; + + /* pick out active nodes from mds_info (state > 0) */ + ceph_decode_32(p, n); + for (i = 0; i < n; i++) { + u32 namelen; + s32 mds, inc, state; + u64 state_seq; + u8 infoversion; + struct ceph_entity_addr addr; + u32 num_export_targets; + void *pexport_targets = NULL; + + ceph_decode_need(p, end, sizeof(addr) + 1 + sizeof(u32), bad); + *p += sizeof(addr); /* skip addr key */ + ceph_decode_8(p, infoversion); + ceph_decode_32(p, namelen); /* skip mds name */ + *p += namelen; + + ceph_decode_need(p, end, + 5*sizeof(u32) + sizeof(u64) + + sizeof(addr) + sizeof(struct ceph_timespec), + bad); + ceph_decode_32(p, mds); + ceph_decode_32(p, inc); + ceph_decode_32(p, state); + ceph_decode_64(p, state_seq); + ceph_decode_copy(p, &addr, sizeof(addr)); + *p += sizeof(struct ceph_timespec); + *p += sizeof(u32); + ceph_decode_32_safe(p, end, namelen, bad); + *p += sizeof(namelen); + if (infoversion >= 2) { + ceph_decode_32_safe(p, end, num_export_targets, bad); + pexport_targets = *p; + *p += sizeof(num_export_targets * sizeof(u32)); + } else { + num_export_targets = 0; + } + + dout("mdsmap_decode %d/%d mds%d.%d %s %s\n", + i+1, n, mds, inc, pr_addr(&addr.in_addr), + ceph_mds_state_name(state)); + if (mds >= 0 && mds < m->m_max_mds && state > 0) { + m->m_info[mds].state = state; + m->m_info[mds].addr = addr; + m->m_info[mds].num_export_targets = num_export_targets; + if (num_export_targets) { + m->m_info[mds].export_targets = + kcalloc(num_export_targets, sizeof(u32), + GFP_NOFS); + for (j = 0; j < num_export_targets; j++) + ceph_decode_32(&pexport_targets, + m->m_info[mds].export_targets[j]); + } else { + m->m_info[mds].export_targets = NULL; + } + } + } + + /* pg_pools */ + ceph_decode_32_safe(p, end, n, bad); + m->m_num_data_pg_pools = n; + m->m_data_pg_pools = kcalloc(n, sizeof(u32), GFP_NOFS); + if (!m->m_data_pg_pools) + goto badmem; + ceph_decode_need(p, end, sizeof(u32)*(n+1), bad); + for (i = 0; i < n; i++) + ceph_decode_32(p, m->m_data_pg_pools[i]); + ceph_decode_32(p, m->m_cas_pg_pool); + + /* ok, we don't care about the rest. */ + dout("mdsmap_decode success epoch %u\n", m->m_epoch); + return m; + +badmem: + err = -ENOMEM; +bad: + pr_err("corrupt mdsmap\n"); + ceph_mdsmap_destroy(m); + return ERR_PTR(-EINVAL); +} + +void ceph_mdsmap_destroy(struct ceph_mdsmap *m) +{ + int i; + + for (i = 0; i < m->m_max_mds; i++) + kfree(m->m_info[i].export_targets); + kfree(m->m_info); + kfree(m->m_data_pg_pools); + kfree(m); +} diff --git a/fs/ceph/mdsmap.h b/fs/ceph/mdsmap.h new file mode 100644 index 000000000000..d317308648fb --- /dev/null +++ b/fs/ceph/mdsmap.h @@ -0,0 +1,53 @@ +#ifndef _FS_CEPH_MDSMAP_H +#define _FS_CEPH_MDSMAP_H + +#include "types.h" + +/* + * mds map - describe servers in the mds cluster. + * + * we limit fields to those the client actually xcares about + */ +struct ceph_mds_info { + struct ceph_entity_addr addr; + s32 state; + int num_export_targets; + u32 *export_targets; +}; + +struct ceph_mdsmap { + u32 m_epoch, m_client_epoch, m_last_failure; + u32 m_root; + u32 m_session_timeout; /* seconds */ + u32 m_session_autoclose; /* seconds */ + u64 m_max_file_size; + u32 m_max_mds; /* size of m_addr, m_state arrays */ + struct ceph_mds_info *m_info; + + /* which object pools file data can be stored in */ + int m_num_data_pg_pools; + u32 *m_data_pg_pools; + u32 m_cas_pg_pool; +}; + +static inline struct ceph_entity_addr * +ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w) +{ + if (w >= m->m_max_mds) + return NULL; + return &m->m_info[w].addr; +} + +static inline int ceph_mdsmap_get_state(struct ceph_mdsmap *m, int w) +{ + BUG_ON(w < 0); + if (w >= m->m_max_mds) + return CEPH_MDS_STATE_DNE; + return m->m_info[w].state; +} + +extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m); +extern struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end); +extern void ceph_mdsmap_destroy(struct ceph_mdsmap *m); + +#endif -- cgit v1.2.3 From f24e9980eb860d8600cbe5ef3d2fd9295320d229 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 6 Oct 2009 11:31:10 -0700 Subject: ceph: OSD client The OSD client is responsible for reading and writing data from/to the object storage pool. This includes determining where objects are stored in the cluster, and ensuring that requests are retried or redirected in the event of a node failure or data migration. If an OSD does not respond before a timeout expires, keepalive messages are sent across the lossless, ordered communications channel to ensure that any break in the TCP is discovered. If the session does reset, a reconnection is attempted and affected requests are resent (by the message transport layer). Signed-off-by: Sage Weil --- fs/ceph/osd_client.c | 1294 ++++++++++++++++++++++++++++++++++++++++++++++++++ fs/ceph/osd_client.h | 144 ++++++ fs/ceph/osdmap.c | 875 ++++++++++++++++++++++++++++++++++ fs/ceph/osdmap.h | 123 +++++ 4 files changed, 2436 insertions(+) create mode 100644 fs/ceph/osd_client.c create mode 100644 fs/ceph/osd_client.h create mode 100644 fs/ceph/osdmap.c create mode 100644 fs/ceph/osdmap.h diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c new file mode 100644 index 000000000000..978593a4f466 --- /dev/null +++ b/fs/ceph/osd_client.c @@ -0,0 +1,1294 @@ +#include "ceph_debug.h" + +#include +#include +#include +#include +#include +#include + +#include "super.h" +#include "osd_client.h" +#include "messenger.h" +#include "decode.h" + +const static struct ceph_connection_operations osd_con_ops; + +static void kick_requests(struct ceph_osd_client *osdc, struct ceph_osd *osd); + +/* + * Implement client access to distributed object storage cluster. + * + * All data objects are stored within a cluster/cloud of OSDs, or + * "object storage devices." (Note that Ceph OSDs have _nothing_ to + * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply + * remote daemons serving up and coordinating consistent and safe + * access to storage. + * + * Cluster membership and the mapping of data objects onto storage devices + * are described by the osd map. + * + * We keep track of pending OSD requests (read, write), resubmit + * requests to different OSDs when the cluster topology/data layout + * change, or retry the affected requests when the communications + * channel with an OSD is reset. + */ + +/* + * calculate the mapping of a file extent onto an object, and fill out the + * request accordingly. shorten extent as necessary if it crosses an + * object boundary. + * + * fill osd op in request message. + */ +static void calc_layout(struct ceph_osd_client *osdc, + struct ceph_vino vino, struct ceph_file_layout *layout, + u64 off, u64 *plen, + struct ceph_osd_request *req) +{ + struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base; + struct ceph_osd_op *op = (void *)(reqhead + 1); + u64 orig_len = *plen; + u64 objoff, objlen; /* extent in object */ + u64 bno; + + reqhead->snapid = cpu_to_le64(vino.snap); + + /* object extent? */ + ceph_calc_file_object_mapping(layout, off, plen, &bno, + &objoff, &objlen); + if (*plen < orig_len) + dout(" skipping last %llu, final file extent %llu~%llu\n", + orig_len - *plen, off, *plen); + + sprintf(req->r_oid, "%llx.%08llx", vino.ino, bno); + req->r_oid_len = strlen(req->r_oid); + + op->extent.offset = cpu_to_le64(objoff); + op->extent.length = cpu_to_le64(objlen); + req->r_num_pages = calc_pages_for(off, *plen); + + dout("calc_layout %s (%d) %llu~%llu (%d pages)\n", + req->r_oid, req->r_oid_len, objoff, objlen, req->r_num_pages); +} + + +/* + * requests + */ +void ceph_osdc_put_request(struct ceph_osd_request *req) +{ + dout("osdc put_request %p %d -> %d\n", req, atomic_read(&req->r_ref), + atomic_read(&req->r_ref)-1); + BUG_ON(atomic_read(&req->r_ref) <= 0); + if (atomic_dec_and_test(&req->r_ref)) { + if (req->r_request) + ceph_msg_put(req->r_request); + if (req->r_reply) + ceph_msg_put(req->r_reply); + if (req->r_own_pages) + ceph_release_page_vector(req->r_pages, + req->r_num_pages); + ceph_put_snap_context(req->r_snapc); + if (req->r_mempool) + mempool_free(req, req->r_osdc->req_mempool); + else + kfree(req); + } +} + +/* + * build new request AND message, calculate layout, and adjust file + * extent as needed. + * + * if the file was recently truncated, we include information about its + * old and new size so that the object can be updated appropriately. (we + * avoid synchronously deleting truncated objects because it's slow.) + * + * if @do_sync, include a 'startsync' command so that the osd will flush + * data quickly. + */ +struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, + struct ceph_file_layout *layout, + struct ceph_vino vino, + u64 off, u64 *plen, + int opcode, int flags, + struct ceph_snap_context *snapc, + int do_sync, + u32 truncate_seq, + u64 truncate_size, + struct timespec *mtime, + bool use_mempool, int num_reply) +{ + struct ceph_osd_request *req; + struct ceph_msg *msg; + struct ceph_osd_request_head *head; + struct ceph_osd_op *op; + void *p; + int do_trunc = truncate_seq && (off + *plen > truncate_size); + int num_op = 1 + do_sync + do_trunc; + size_t msg_size = sizeof(*head) + num_op*sizeof(*op); + int err, i; + u64 prevofs; + + if (use_mempool) { + req = mempool_alloc(osdc->req_mempool, GFP_NOFS); + memset(req, 0, sizeof(*req)); + } else { + req = kzalloc(sizeof(*req), GFP_NOFS); + } + if (req == NULL) + return ERR_PTR(-ENOMEM); + + err = ceph_msgpool_resv(&osdc->msgpool_op_reply, num_reply); + if (err) { + ceph_osdc_put_request(req); + return ERR_PTR(-ENOMEM); + } + + req->r_osdc = osdc; + req->r_mempool = use_mempool; + atomic_set(&req->r_ref, 1); + init_completion(&req->r_completion); + init_completion(&req->r_safe_completion); + INIT_LIST_HEAD(&req->r_unsafe_item); + req->r_flags = flags; + + WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0); + + /* create message; allow space for oid */ + msg_size += 40; + if (snapc) + msg_size += sizeof(u64) * snapc->num_snaps; + if (use_mempool) + msg = ceph_msgpool_get(&osdc->msgpool_op); + else + msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, 0, 0, NULL); + if (IS_ERR(msg)) { + ceph_msgpool_resv(&osdc->msgpool_op_reply, num_reply); + ceph_osdc_put_request(req); + return ERR_PTR(PTR_ERR(msg)); + } + msg->hdr.type = cpu_to_le16(CEPH_MSG_OSD_OP); + memset(msg->front.iov_base, 0, msg->front.iov_len); + head = msg->front.iov_base; + op = (void *)(head + 1); + p = (void *)(op + num_op); + + req->r_request = msg; + req->r_snapc = ceph_get_snap_context(snapc); + + head->client_inc = cpu_to_le32(1); /* always, for now. */ + head->flags = cpu_to_le32(flags); + if (flags & CEPH_OSD_FLAG_WRITE) + ceph_encode_timespec(&head->mtime, mtime); + head->num_ops = cpu_to_le16(num_op); + op->op = cpu_to_le16(opcode); + + /* calculate max write size */ + calc_layout(osdc, vino, layout, off, plen, req); + req->r_file_layout = *layout; /* keep a copy */ + + if (flags & CEPH_OSD_FLAG_WRITE) { + req->r_request->hdr.data_off = cpu_to_le16(off); + req->r_request->hdr.data_len = cpu_to_le32(*plen); + op->payload_len = cpu_to_le32(*plen); + } + + /* fill in oid */ + head->object_len = cpu_to_le32(req->r_oid_len); + memcpy(p, req->r_oid, req->r_oid_len); + p += req->r_oid_len; + + /* additional ops */ + if (do_trunc) { + op++; + op->op = cpu_to_le16(opcode == CEPH_OSD_OP_READ ? + CEPH_OSD_OP_MASKTRUNC : CEPH_OSD_OP_SETTRUNC); + op->trunc.truncate_seq = cpu_to_le32(truncate_seq); + prevofs = le64_to_cpu((op-1)->extent.offset); + op->trunc.truncate_size = cpu_to_le64(truncate_size - + (off-prevofs)); + } + if (do_sync) { + op++; + op->op = cpu_to_le16(CEPH_OSD_OP_STARTSYNC); + } + if (snapc) { + head->snap_seq = cpu_to_le64(snapc->seq); + head->num_snaps = cpu_to_le32(snapc->num_snaps); + for (i = 0; i < snapc->num_snaps; i++) { + put_unaligned_le64(snapc->snaps[i], p); + p += sizeof(u64); + } + } + + BUG_ON(p > msg->front.iov_base + msg->front.iov_len); + return req; +} + +/* + * We keep osd requests in an rbtree, sorted by ->r_tid. + */ +static void __insert_request(struct ceph_osd_client *osdc, + struct ceph_osd_request *new) +{ + struct rb_node **p = &osdc->requests.rb_node; + struct rb_node *parent = NULL; + struct ceph_osd_request *req = NULL; + + while (*p) { + parent = *p; + req = rb_entry(parent, struct ceph_osd_request, r_node); + if (new->r_tid < req->r_tid) + p = &(*p)->rb_left; + else if (new->r_tid > req->r_tid) + p = &(*p)->rb_right; + else + BUG(); + } + + rb_link_node(&new->r_node, parent, p); + rb_insert_color(&new->r_node, &osdc->requests); +} + +static struct ceph_osd_request *__lookup_request(struct ceph_osd_client *osdc, + u64 tid) +{ + struct ceph_osd_request *req; + struct rb_node *n = osdc->requests.rb_node; + + while (n) { + req = rb_entry(n, struct ceph_osd_request, r_node); + if (tid < req->r_tid) + n = n->rb_left; + else if (tid > req->r_tid) + n = n->rb_right; + else + return req; + } + return NULL; +} + +static struct ceph_osd_request * +__lookup_request_ge(struct ceph_osd_client *osdc, + u64 tid) +{ + struct ceph_osd_request *req; + struct rb_node *n = osdc->requests.rb_node; + + while (n) { + req = rb_entry(n, struct ceph_osd_request, r_node); + if (tid < req->r_tid) { + if (!n->rb_left) + return req; + n = n->rb_left; + } else if (tid > req->r_tid) { + n = n->rb_right; + } else { + return req; + } + } + return NULL; +} + + +/* + * The messaging layer will reconnect to the osd as needed. If the + * session has dropped, the OSD will have dropped the session state, + * and we'll get notified by the messaging layer. If that happens, we + * need to resubmit all requests for that osd. + */ +static void osd_reset(struct ceph_connection *con) +{ + struct ceph_osd *osd = con->private; + struct ceph_osd_client *osdc; + + if (!osd) + return; + dout("osd_reset osd%d\n", osd->o_osd); + osdc = osd->o_osdc; + osd->o_incarnation++; + down_read(&osdc->map_sem); + kick_requests(osdc, osd); + up_read(&osdc->map_sem); +} + +/* + * Track open sessions with osds. + */ +static struct ceph_osd *create_osd(struct ceph_osd_client *osdc) +{ + struct ceph_osd *osd; + + osd = kzalloc(sizeof(*osd), GFP_NOFS); + if (!osd) + return NULL; + + atomic_set(&osd->o_ref, 1); + osd->o_osdc = osdc; + INIT_LIST_HEAD(&osd->o_requests); + osd->o_incarnation = 1; + + ceph_con_init(osdc->client->msgr, &osd->o_con); + osd->o_con.private = osd; + osd->o_con.ops = &osd_con_ops; + osd->o_con.peer_name.type = CEPH_ENTITY_TYPE_OSD; + return osd; +} + +static struct ceph_osd *get_osd(struct ceph_osd *osd) +{ + if (atomic_inc_not_zero(&osd->o_ref)) { + dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1, + atomic_read(&osd->o_ref)); + return osd; + } else { + dout("get_osd %p FAIL\n", osd); + return NULL; + } +} + +static void put_osd(struct ceph_osd *osd) +{ + dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref), + atomic_read(&osd->o_ref) - 1); + if (atomic_dec_and_test(&osd->o_ref)) { + ceph_con_shutdown(&osd->o_con); + kfree(osd); + } +} + +/* + * remove an osd from our map + */ +static void remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) +{ + dout("remove_osd %p\n", osd); + BUG_ON(!list_empty(&osd->o_requests)); + rb_erase(&osd->o_node, &osdc->osds); + ceph_con_close(&osd->o_con); + put_osd(osd); +} + +/* + * reset osd connect + */ +static int reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) +{ + int ret = 0; + + dout("reset_osd %p osd%d\n", osd, osd->o_osd); + if (list_empty(&osd->o_requests)) { + remove_osd(osdc, osd); + } else { + ceph_con_close(&osd->o_con); + ceph_con_open(&osd->o_con, &osdc->osdmap->osd_addr[osd->o_osd]); + osd->o_incarnation++; + } + return ret; +} + +static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new) +{ + struct rb_node **p = &osdc->osds.rb_node; + struct rb_node *parent = NULL; + struct ceph_osd *osd = NULL; + + while (*p) { + parent = *p; + osd = rb_entry(parent, struct ceph_osd, o_node); + if (new->o_osd < osd->o_osd) + p = &(*p)->rb_left; + else if (new->o_osd > osd->o_osd) + p = &(*p)->rb_right; + else + BUG(); + } + + rb_link_node(&new->o_node, parent, p); + rb_insert_color(&new->o_node, &osdc->osds); +} + +static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o) +{ + struct ceph_osd *osd; + struct rb_node *n = osdc->osds.rb_node; + + while (n) { + osd = rb_entry(n, struct ceph_osd, o_node); + if (o < osd->o_osd) + n = n->rb_left; + else if (o > osd->o_osd) + n = n->rb_right; + else + return osd; + } + return NULL; +} + + +/* + * Register request, assign tid. If this is the first request, set up + * the timeout event. + */ +static void register_request(struct ceph_osd_client *osdc, + struct ceph_osd_request *req) +{ + struct ceph_osd_request_head *head = req->r_request->front.iov_base; + + mutex_lock(&osdc->request_mutex); + req->r_tid = ++osdc->last_tid; + head->tid = cpu_to_le64(req->r_tid); + + dout("register_request %p tid %lld\n", req, req->r_tid); + __insert_request(osdc, req); + ceph_osdc_get_request(req); + osdc->num_requests++; + + req->r_timeout_stamp = + jiffies + osdc->client->mount_args.osd_timeout*HZ; + + if (osdc->num_requests == 1) { + osdc->timeout_tid = req->r_tid; + dout(" timeout on tid %llu at %lu\n", req->r_tid, + req->r_timeout_stamp); + schedule_delayed_work(&osdc->timeout_work, + round_jiffies_relative(req->r_timeout_stamp - jiffies)); + } + mutex_unlock(&osdc->request_mutex); +} + +/* + * called under osdc->request_mutex + */ +static void __unregister_request(struct ceph_osd_client *osdc, + struct ceph_osd_request *req) +{ + dout("__unregister_request %p tid %lld\n", req, req->r_tid); + rb_erase(&req->r_node, &osdc->requests); + osdc->num_requests--; + + list_del_init(&req->r_osd_item); + if (list_empty(&req->r_osd->o_requests)) + remove_osd(osdc, req->r_osd); + req->r_osd = NULL; + + ceph_osdc_put_request(req); + + if (req->r_tid == osdc->timeout_tid) { + if (osdc->num_requests == 0) { + dout("no requests, canceling timeout\n"); + osdc->timeout_tid = 0; + cancel_delayed_work(&osdc->timeout_work); + } else { + req = rb_entry(rb_first(&osdc->requests), + struct ceph_osd_request, r_node); + osdc->timeout_tid = req->r_tid; + dout("rescheduled timeout on tid %llu at %lu\n", + req->r_tid, req->r_timeout_stamp); + schedule_delayed_work(&osdc->timeout_work, + round_jiffies_relative(req->r_timeout_stamp - + jiffies)); + } + } +} + +/* + * Cancel a previously queued request message + */ +static void __cancel_request(struct ceph_osd_request *req) +{ + if (req->r_sent) { + ceph_con_revoke(&req->r_osd->o_con, req->r_request); + req->r_sent = 0; + } +} + +/* + * Pick an osd (the first 'up' osd in the pg), allocate the osd struct + * (as needed), and set the request r_osd appropriately. If there is + * no up osd, set r_osd to NULL. + * + * Return 0 if unchanged, 1 if changed, or negative on error. + * + * Caller should hold map_sem for read and request_mutex. + */ +static int __map_osds(struct ceph_osd_client *osdc, + struct ceph_osd_request *req) +{ + struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base; + union ceph_pg pgid; + int o = -1; + int err; + struct ceph_osd *newosd = NULL; + + dout("map_osds %p tid %lld\n", req, req->r_tid); + err = ceph_calc_object_layout(&reqhead->layout, req->r_oid, + &req->r_file_layout, osdc->osdmap); + if (err) + return err; + pgid.pg64 = le64_to_cpu(reqhead->layout.ol_pgid); + o = ceph_calc_pg_primary(osdc->osdmap, pgid); + + if ((req->r_osd && req->r_osd->o_osd == o && + req->r_sent >= req->r_osd->o_incarnation) || + (req->r_osd == NULL && o == -1)) + return 0; /* no change */ + + dout("map_osds tid %llu pgid %llx pool %d osd%d (was osd%d)\n", + req->r_tid, pgid.pg64, pgid.pg.pool, o, + req->r_osd ? req->r_osd->o_osd : -1); + + if (req->r_osd) { + __cancel_request(req); + list_del_init(&req->r_osd_item); + if (list_empty(&req->r_osd->o_requests)) { + /* try to re-use r_osd if possible */ + newosd = get_osd(req->r_osd); + remove_osd(osdc, newosd); + } + req->r_osd = NULL; + } + + req->r_osd = __lookup_osd(osdc, o); + if (!req->r_osd && o >= 0) { + if (newosd) { + req->r_osd = newosd; + newosd = NULL; + } else { + err = -ENOMEM; + req->r_osd = create_osd(osdc); + if (!req->r_osd) + goto out; + } + + dout("map_osds osd %p is osd%d\n", req->r_osd, o); + req->r_osd->o_osd = o; + req->r_osd->o_con.peer_name.num = cpu_to_le64(o); + __insert_osd(osdc, req->r_osd); + + ceph_con_open(&req->r_osd->o_con, &osdc->osdmap->osd_addr[o]); + } + + if (req->r_osd) + list_add(&req->r_osd_item, &req->r_osd->o_requests); + err = 1; /* osd changed */ + +out: + if (newosd) + put_osd(newosd); + return err; +} + +/* + * caller should hold map_sem (for read) and request_mutex + */ +static int __send_request(struct ceph_osd_client *osdc, + struct ceph_osd_request *req) +{ + struct ceph_osd_request_head *reqhead; + int err; + + err = __map_osds(osdc, req); + if (err < 0) + return err; + if (req->r_osd == NULL) { + dout("send_request %p no up osds in pg\n", req); + ceph_monc_request_next_osdmap(&osdc->client->monc); + return 0; + } + + dout("send_request %p tid %llu to osd%d flags %d\n", + req, req->r_tid, req->r_osd->o_osd, req->r_flags); + + reqhead = req->r_request->front.iov_base; + reqhead->osdmap_epoch = cpu_to_le32(osdc->osdmap->epoch); + reqhead->flags |= cpu_to_le32(req->r_flags); /* e.g., RETRY */ + reqhead->reassert_version = req->r_reassert_version; + + req->r_timeout_stamp = jiffies+osdc->client->mount_args.osd_timeout*HZ; + + ceph_msg_get(req->r_request); /* send consumes a ref */ + ceph_con_send(&req->r_osd->o_con, req->r_request); + req->r_sent = req->r_osd->o_incarnation; + return 0; +} + +/* + * Timeout callback, called every N seconds when 1 or more osd + * requests has been active for more than N seconds. When this + * happens, we ping all OSDs with requests who have timed out to + * ensure any communications channel reset is detected. Reset the + * request timeouts another N seconds in the future as we go. + * Reschedule the timeout event another N seconds in future (unless + * there are no open requests). + */ +static void handle_timeout(struct work_struct *work) +{ + struct ceph_osd_client *osdc = + container_of(work, struct ceph_osd_client, timeout_work.work); + struct ceph_osd_request *req; + struct ceph_osd *osd; + unsigned long timeout = osdc->client->mount_args.osd_timeout * HZ; + unsigned long next_timeout = timeout + jiffies; + struct rb_node *p; + + dout("timeout\n"); + down_read(&osdc->map_sem); + + ceph_monc_request_next_osdmap(&osdc->client->monc); + + mutex_lock(&osdc->request_mutex); + for (p = rb_first(&osdc->requests); p; p = rb_next(p)) { + req = rb_entry(p, struct ceph_osd_request, r_node); + + if (req->r_resend) { + int err; + + dout("osdc resending prev failed %lld\n", req->r_tid); + err = __send_request(osdc, req); + if (err) + dout("osdc failed again on %lld\n", req->r_tid); + else + req->r_resend = false; + continue; + } + } + for (p = rb_first(&osdc->osds); p; p = rb_next(p)) { + osd = rb_entry(p, struct ceph_osd, o_node); + if (list_empty(&osd->o_requests)) + continue; + req = list_first_entry(&osd->o_requests, + struct ceph_osd_request, r_osd_item); + if (time_before(jiffies, req->r_timeout_stamp)) + continue; + + dout(" tid %llu (at least) timed out on osd%d\n", + req->r_tid, osd->o_osd); + req->r_timeout_stamp = next_timeout; + ceph_con_keepalive(&osd->o_con); + } + + if (osdc->timeout_tid) + schedule_delayed_work(&osdc->timeout_work, + round_jiffies_relative(timeout)); + + mutex_unlock(&osdc->request_mutex); + + up_read(&osdc->map_sem); +} + +/* + * handle osd op reply. either call the callback if it is specified, + * or do the completion to wake up the waiting thread. + */ +static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg) +{ + struct ceph_osd_reply_head *rhead = msg->front.iov_base; + struct ceph_osd_request *req; + u64 tid; + int numops, object_len, flags; + + if (msg->front.iov_len < sizeof(*rhead)) + goto bad; + tid = le64_to_cpu(rhead->tid); + numops = le32_to_cpu(rhead->num_ops); + object_len = le32_to_cpu(rhead->object_len); + if (msg->front.iov_len != sizeof(*rhead) + object_len + + numops * sizeof(struct ceph_osd_op)) + goto bad; + dout("handle_reply %p tid %llu\n", msg, tid); + + /* lookup */ + mutex_lock(&osdc->request_mutex); + req = __lookup_request(osdc, tid); + if (req == NULL) { + dout("handle_reply tid %llu dne\n", tid); + mutex_unlock(&osdc->request_mutex); + return; + } + ceph_osdc_get_request(req); + flags = le32_to_cpu(rhead->flags); + + if (req->r_reply) { + /* + * once we see the message has been received, we don't + * need a ref (which is only needed for revoking + * pages) + */ + ceph_msg_put(req->r_reply); + req->r_reply = NULL; + } + + if (!req->r_got_reply) { + unsigned bytes; + + req->r_result = le32_to_cpu(rhead->result); + bytes = le32_to_cpu(msg->hdr.data_len); + dout("handle_reply result %d bytes %d\n", req->r_result, + bytes); + if (req->r_result == 0) + req->r_result = bytes; + + /* in case this is a write and we need to replay, */ + req->r_reassert_version = rhead->reassert_version; + + req->r_got_reply = 1; + } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) { + dout("handle_reply tid %llu dup ack\n", tid); + goto done; + } + + dout("handle_reply tid %llu flags %d\n", tid, flags); + + /* either this is a read, or we got the safe response */ + if ((flags & CEPH_OSD_FLAG_ONDISK) || + ((flags & CEPH_OSD_FLAG_WRITE) == 0)) + __unregister_request(osdc, req); + + mutex_unlock(&osdc->request_mutex); + + if (req->r_callback) + req->r_callback(req, msg); + else + complete(&req->r_completion); + + if (flags & CEPH_OSD_FLAG_ONDISK) { + if (req->r_safe_callback) + req->r_safe_callback(req, msg); + complete(&req->r_safe_completion); /* fsync waiter */ + } + +done: + ceph_osdc_put_request(req); + return; + +bad: + pr_err("corrupt osd_op_reply got %d %d expected %d\n", + (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len), + (int)sizeof(*rhead)); +} + + +/* + * Resubmit osd requests whose osd or osd address has changed. Request + * a new osd map if osds are down, or we are otherwise unable to determine + * how to direct a request. + * + * Close connections to down osds. + * + * If @who is specified, resubmit requests for that specific osd. + * + * Caller should hold map_sem for read and request_mutex. + */ +static void kick_requests(struct ceph_osd_client *osdc, + struct ceph_osd *kickosd) +{ + struct ceph_osd_request *req; + struct rb_node *p, *n; + int needmap = 0; + int err; + + dout("kick_requests osd%d\n", kickosd ? kickosd->o_osd : -1); + mutex_lock(&osdc->request_mutex); + if (!kickosd) { + for (p = rb_first(&osdc->osds); p; p = n) { + struct ceph_osd *osd = + rb_entry(p, struct ceph_osd, o_node); + + n = rb_next(p); + if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) || + !ceph_entity_addr_equal(&osd->o_con.peer_addr, + ceph_osd_addr(osdc->osdmap, + osd->o_osd))) + reset_osd(osdc, osd); + } + } + + for (p = rb_first(&osdc->requests); p; p = rb_next(p)) { + req = rb_entry(p, struct ceph_osd_request, r_node); + + if (req->r_resend) { + dout(" r_resend set on tid %llu\n", req->r_tid); + goto kick; + } + if (req->r_osd && kickosd == req->r_osd) + goto kick; + + err = __map_osds(osdc, req); + if (err == 0) + continue; /* no change */ + if (err < 0) { + /* + * FIXME: really, we should set the request + * error and fail if this isn't a 'nofail' + * request, but that's a fair bit more + * complicated to do. So retry! + */ + dout(" setting r_resend on %llu\n", req->r_tid); + req->r_resend = true; + continue; + } + if (req->r_osd == NULL) { + dout("tid %llu maps to no valid osd\n", req->r_tid); + needmap++; /* request a newer map */ + continue; + } + +kick: + dout("kicking tid %llu osd%d\n", req->r_tid, req->r_osd->o_osd); + req->r_flags |= CEPH_OSD_FLAG_RETRY; + err = __send_request(osdc, req); + if (err) { + dout(" setting r_resend on %llu\n", req->r_tid); + req->r_resend = true; + } + } + mutex_unlock(&osdc->request_mutex); + + if (needmap) { + dout("%d requests for down osds, need new map\n", needmap); + ceph_monc_request_next_osdmap(&osdc->client->monc); + } +} + +/* + * Process updated osd map. + * + * The message contains any number of incremental and full maps, normally + * indicating some sort of topology change in the cluster. Kick requests + * off to different OSDs as needed. + */ +void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg) +{ + void *p, *end, *next; + u32 nr_maps, maplen; + u32 epoch; + struct ceph_osdmap *newmap = NULL, *oldmap; + int err; + struct ceph_fsid fsid; + + dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0); + p = msg->front.iov_base; + end = p + msg->front.iov_len; + + /* verify fsid */ + ceph_decode_need(&p, end, sizeof(fsid), bad); + ceph_decode_copy(&p, &fsid, sizeof(fsid)); + if (ceph_fsid_compare(&fsid, &osdc->client->monc.monmap->fsid)) { + pr_err("got osdmap with wrong fsid, ignoring\n"); + return; + } + + down_write(&osdc->map_sem); + + /* incremental maps */ + ceph_decode_32_safe(&p, end, nr_maps, bad); + dout(" %d inc maps\n", nr_maps); + while (nr_maps > 0) { + ceph_decode_need(&p, end, 2*sizeof(u32), bad); + ceph_decode_32(&p, epoch); + ceph_decode_32(&p, maplen); + ceph_decode_need(&p, end, maplen, bad); + next = p + maplen; + if (osdc->osdmap && osdc->osdmap->epoch+1 == epoch) { + dout("applying incremental map %u len %d\n", + epoch, maplen); + newmap = osdmap_apply_incremental(&p, next, + osdc->osdmap, + osdc->client->msgr); + if (IS_ERR(newmap)) { + err = PTR_ERR(newmap); + goto bad; + } + if (newmap != osdc->osdmap) { + ceph_osdmap_destroy(osdc->osdmap); + osdc->osdmap = newmap; + } + } else { + dout("ignoring incremental map %u len %d\n", + epoch, maplen); + } + p = next; + nr_maps--; + } + if (newmap) + goto done; + + /* full maps */ + ceph_decode_32_safe(&p, end, nr_maps, bad); + dout(" %d full maps\n", nr_maps); + while (nr_maps) { + ceph_decode_need(&p, end, 2*sizeof(u32), bad); + ceph_decode_32(&p, epoch); + ceph_decode_32(&p, maplen); + ceph_decode_need(&p, end, maplen, bad); + if (nr_maps > 1) { + dout("skipping non-latest full map %u len %d\n", + epoch, maplen); + } else if (osdc->osdmap && osdc->osdmap->epoch >= epoch) { + dout("skipping full map %u len %d, " + "older than our %u\n", epoch, maplen, + osdc->osdmap->epoch); + } else { + dout("taking full map %u len %d\n", epoch, maplen); + newmap = osdmap_decode(&p, p+maplen); + if (IS_ERR(newmap)) { + err = PTR_ERR(newmap); + goto bad; + } + oldmap = osdc->osdmap; + osdc->osdmap = newmap; + if (oldmap) + ceph_osdmap_destroy(oldmap); + } + p += maplen; + nr_maps--; + } + +done: + downgrade_write(&osdc->map_sem); + ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch); + if (newmap) + kick_requests(osdc, NULL); + up_read(&osdc->map_sem); + return; + +bad: + pr_err("osdc handle_map corrupt msg\n"); + up_write(&osdc->map_sem); + return; +} + + +/* + * A read request prepares specific pages that data is to be read into. + * When a message is being read off the wire, we call prepare_pages to + * find those pages. + * 0 = success, -1 failure. + */ +static int prepare_pages(struct ceph_connection *con, struct ceph_msg *m, + int want) +{ + struct ceph_osd *osd = con->private; + struct ceph_osd_client *osdc; + struct ceph_osd_reply_head *rhead = m->front.iov_base; + struct ceph_osd_request *req; + u64 tid; + int ret = -1; + int type = le16_to_cpu(m->hdr.type); + + if (!osd) + return -1; + osdc = osd->o_osdc; + + dout("prepare_pages on msg %p want %d\n", m, want); + if (unlikely(type != CEPH_MSG_OSD_OPREPLY)) + return -1; /* hmm! */ + + tid = le64_to_cpu(rhead->tid); + mutex_lock(&osdc->request_mutex); + req = __lookup_request(osdc, tid); + if (!req) { + dout("prepare_pages unknown tid %llu\n", tid); + goto out; + } + dout("prepare_pages tid %llu has %d pages, want %d\n", + tid, req->r_num_pages, want); + if (likely(req->r_num_pages >= want && !req->r_prepared_pages)) { + m->pages = req->r_pages; + m->nr_pages = req->r_num_pages; + req->r_reply = m; /* only for duration of read over socket */ + ceph_msg_get(m); + req->r_prepared_pages = 1; + ret = 0; /* success */ + } +out: + mutex_unlock(&osdc->request_mutex); + return ret; +} + +/* + * Register request, send initial attempt. + */ +int ceph_osdc_start_request(struct ceph_osd_client *osdc, + struct ceph_osd_request *req, + bool nofail) +{ + int rc; + + req->r_request->pages = req->r_pages; + req->r_request->nr_pages = req->r_num_pages; + + register_request(osdc, req); + + down_read(&osdc->map_sem); + mutex_lock(&osdc->request_mutex); + rc = __send_request(osdc, req); + if (rc) { + if (nofail) { + dout("osdc_start_request failed send, marking %lld\n", + req->r_tid); + req->r_resend = true; + rc = 0; + } else { + __unregister_request(osdc, req); + } + } + mutex_unlock(&osdc->request_mutex); + up_read(&osdc->map_sem); + return rc; +} + +/* + * wait for a request to complete + */ +int ceph_osdc_wait_request(struct ceph_osd_client *osdc, + struct ceph_osd_request *req) +{ + int rc; + + rc = wait_for_completion_interruptible(&req->r_completion); + if (rc < 0) { + mutex_lock(&osdc->request_mutex); + __cancel_request(req); + mutex_unlock(&osdc->request_mutex); + dout("wait_request tid %llu timed out\n", req->r_tid); + return rc; + } + + dout("wait_request tid %llu result %d\n", req->r_tid, req->r_result); + return req->r_result; +} + +/* + * sync - wait for all in-flight requests to flush. avoid starvation. + */ +void ceph_osdc_sync(struct ceph_osd_client *osdc) +{ + struct ceph_osd_request *req; + u64 last_tid, next_tid = 0; + + mutex_lock(&osdc->request_mutex); + last_tid = osdc->last_tid; + while (1) { + req = __lookup_request_ge(osdc, next_tid); + if (!req) + break; + if (req->r_tid > last_tid) + break; + + next_tid = req->r_tid + 1; + if ((req->r_flags & CEPH_OSD_FLAG_WRITE) == 0) + continue; + + ceph_osdc_get_request(req); + mutex_unlock(&osdc->request_mutex); + dout("sync waiting on tid %llu (last is %llu)\n", + req->r_tid, last_tid); + wait_for_completion(&req->r_safe_completion); + mutex_lock(&osdc->request_mutex); + ceph_osdc_put_request(req); + } + mutex_unlock(&osdc->request_mutex); + dout("sync done (thru tid %llu)\n", last_tid); +} + +/* + * init, shutdown + */ +int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client) +{ + int err; + + dout("init\n"); + osdc->client = client; + osdc->osdmap = NULL; + init_rwsem(&osdc->map_sem); + init_completion(&osdc->map_waiters); + osdc->last_requested_map = 0; + mutex_init(&osdc->request_mutex); + osdc->timeout_tid = 0; + osdc->last_tid = 0; + osdc->osds = RB_ROOT; + osdc->requests = RB_ROOT; + osdc->num_requests = 0; + INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout); + + osdc->req_mempool = mempool_create_kmalloc_pool(10, + sizeof(struct ceph_osd_request)); + if (!osdc->req_mempool) + return -ENOMEM; + + err = ceph_msgpool_init(&osdc->msgpool_op, 4096, 10, true); + if (err < 0) + return -ENOMEM; + err = ceph_msgpool_init(&osdc->msgpool_op_reply, 512, 0, false); + if (err < 0) + return -ENOMEM; + + return 0; +} + +void ceph_osdc_stop(struct ceph_osd_client *osdc) +{ + cancel_delayed_work_sync(&osdc->timeout_work); + if (osdc->osdmap) { + ceph_osdmap_destroy(osdc->osdmap); + osdc->osdmap = NULL; + } + mempool_destroy(osdc->req_mempool); + ceph_msgpool_destroy(&osdc->msgpool_op); + ceph_msgpool_destroy(&osdc->msgpool_op_reply); +} + +/* + * Read some contiguous pages. If we cross a stripe boundary, shorten + * *plen. Return number of bytes read, or error. + */ +int ceph_osdc_readpages(struct ceph_osd_client *osdc, + struct ceph_vino vino, struct ceph_file_layout *layout, + u64 off, u64 *plen, + u32 truncate_seq, u64 truncate_size, + struct page **pages, int num_pages) +{ + struct ceph_osd_request *req; + int rc = 0; + + dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino, + vino.snap, off, *plen); + req = ceph_osdc_new_request(osdc, layout, vino, off, plen, + CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, + NULL, 0, truncate_seq, truncate_size, NULL, + false, 1); + if (IS_ERR(req)) + return PTR_ERR(req); + + /* it may be a short read due to an object boundary */ + req->r_pages = pages; + num_pages = calc_pages_for(off, *plen); + req->r_num_pages = num_pages; + + dout("readpages final extent is %llu~%llu (%d pages)\n", + off, *plen, req->r_num_pages); + + rc = ceph_osdc_start_request(osdc, req, false); + if (!rc) + rc = ceph_osdc_wait_request(osdc, req); + + ceph_osdc_put_request(req); + dout("readpages result %d\n", rc); + return rc; +} + +/* + * do a synchronous write on N pages + */ +int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, + struct ceph_file_layout *layout, + struct ceph_snap_context *snapc, + u64 off, u64 len, + u32 truncate_seq, u64 truncate_size, + struct timespec *mtime, + struct page **pages, int num_pages, + int flags, int do_sync, bool nofail) +{ + struct ceph_osd_request *req; + int rc = 0; + + BUG_ON(vino.snap != CEPH_NOSNAP); + req = ceph_osdc_new_request(osdc, layout, vino, off, &len, + CEPH_OSD_OP_WRITE, + flags | CEPH_OSD_FLAG_ONDISK | + CEPH_OSD_FLAG_WRITE, + snapc, do_sync, + truncate_seq, truncate_size, mtime, + nofail, 1); + if (IS_ERR(req)) + return PTR_ERR(req); + + /* it may be a short write due to an object boundary */ + req->r_pages = pages; + req->r_num_pages = calc_pages_for(off, len); + dout("writepages %llu~%llu (%d pages)\n", off, len, + req->r_num_pages); + + rc = ceph_osdc_start_request(osdc, req, nofail); + if (!rc) + rc = ceph_osdc_wait_request(osdc, req); + + ceph_osdc_put_request(req); + if (rc == 0) + rc = len; + dout("writepages result %d\n", rc); + return rc; +} + +/* + * handle incoming message + */ +static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) +{ + struct ceph_osd *osd = con->private; + struct ceph_osd_client *osdc = osd->o_osdc; + int type = le16_to_cpu(msg->hdr.type); + + if (!osd) + return; + + switch (type) { + case CEPH_MSG_OSD_MAP: + ceph_osdc_handle_map(osdc, msg); + break; + case CEPH_MSG_OSD_OPREPLY: + handle_reply(osdc, msg); + break; + + default: + pr_err("received unknown message type %d %s\n", type, + ceph_msg_type_name(type)); + } + ceph_msg_put(msg); +} + +static struct ceph_msg *alloc_msg(struct ceph_connection *con, + struct ceph_msg_header *hdr) +{ + struct ceph_osd *osd = con->private; + struct ceph_osd_client *osdc = osd->o_osdc; + int type = le16_to_cpu(hdr->type); + + switch (type) { + case CEPH_MSG_OSD_OPREPLY: + return ceph_msgpool_get(&osdc->msgpool_op_reply); + } + return ceph_alloc_msg(con, hdr); +} + +/* + * Wrappers to refcount containing ceph_osd struct + */ +static struct ceph_connection *get_osd_con(struct ceph_connection *con) +{ + struct ceph_osd *osd = con->private; + if (get_osd(osd)) + return con; + return NULL; +} + +static void put_osd_con(struct ceph_connection *con) +{ + struct ceph_osd *osd = con->private; + put_osd(osd); +} + +const static struct ceph_connection_operations osd_con_ops = { + .get = get_osd_con, + .put = put_osd_con, + .dispatch = dispatch, + .alloc_msg = alloc_msg, + .peer_reset = osd_reset, + .alloc_middle = ceph_alloc_middle, + .prepare_pages = prepare_pages, +}; diff --git a/fs/ceph/osd_client.h b/fs/ceph/osd_client.h new file mode 100644 index 000000000000..9a4addf7d651 --- /dev/null +++ b/fs/ceph/osd_client.h @@ -0,0 +1,144 @@ +#ifndef _FS_CEPH_OSD_CLIENT_H +#define _FS_CEPH_OSD_CLIENT_H + +#include +#include +#include + +#include "types.h" +#include "osdmap.h" +#include "messenger.h" + +struct ceph_msg; +struct ceph_snap_context; +struct ceph_osd_request; +struct ceph_osd_client; + +/* + * completion callback for async writepages + */ +typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *, + struct ceph_msg *); + +/* a given osd we're communicating with */ +struct ceph_osd { + atomic_t o_ref; + struct ceph_osd_client *o_osdc; + int o_osd; + int o_incarnation; + struct rb_node o_node; + struct ceph_connection o_con; + struct list_head o_requests; +}; + +/* an in-flight request */ +struct ceph_osd_request { + u64 r_tid; /* unique for this client */ + struct rb_node r_node; + struct list_head r_osd_item; + struct ceph_osd *r_osd; + + struct ceph_msg *r_request, *r_reply; + int r_result; + int r_flags; /* any additional flags for the osd */ + u32 r_sent; /* >0 if r_request is sending/sent */ + int r_prepared_pages, r_got_reply; + + struct ceph_osd_client *r_osdc; + atomic_t r_ref; + bool r_mempool; + struct completion r_completion, r_safe_completion; + ceph_osdc_callback_t r_callback, r_safe_callback; + struct ceph_eversion r_reassert_version; + struct list_head r_unsafe_item; + + struct inode *r_inode; /* for use by callbacks */ + struct writeback_control *r_wbc; /* ditto */ + + char r_oid[40]; /* object name */ + int r_oid_len; + unsigned long r_timeout_stamp; + bool r_resend; /* msg send failed, needs retry */ + + struct ceph_file_layout r_file_layout; + struct ceph_snap_context *r_snapc; /* snap context for writes */ + unsigned r_num_pages; /* size of page array (follows) */ + struct page **r_pages; /* pages for data payload */ + int r_pages_from_pool; + int r_own_pages; /* if true, i own page list */ +}; + +struct ceph_osd_client { + struct ceph_client *client; + + struct ceph_osdmap *osdmap; /* current map */ + struct rw_semaphore map_sem; + struct completion map_waiters; + u64 last_requested_map; + + struct mutex request_mutex; + struct rb_root osds; /* osds */ + u64 timeout_tid; /* tid of timeout triggering rq */ + u64 last_tid; /* tid of last request */ + struct rb_root requests; /* pending requests */ + int num_requests; + struct delayed_work timeout_work; + struct dentry *debugfs_file; + + mempool_t *req_mempool; + + struct ceph_msgpool msgpool_op; + struct ceph_msgpool msgpool_op_reply; +}; + +extern int ceph_osdc_init(struct ceph_osd_client *osdc, + struct ceph_client *client); +extern void ceph_osdc_stop(struct ceph_osd_client *osdc); + +extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc, + struct ceph_msg *msg); +extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc, + struct ceph_msg *msg); + +extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *, + struct ceph_file_layout *layout, + struct ceph_vino vino, + u64 offset, u64 *len, int op, int flags, + struct ceph_snap_context *snapc, + int do_sync, u32 truncate_seq, + u64 truncate_size, + struct timespec *mtime, + bool use_mempool, int num_reply); + +static inline void ceph_osdc_get_request(struct ceph_osd_request *req) +{ + atomic_inc(&req->r_ref); +} +extern void ceph_osdc_put_request(struct ceph_osd_request *req); + +extern int ceph_osdc_start_request(struct ceph_osd_client *osdc, + struct ceph_osd_request *req, + bool nofail); +extern int ceph_osdc_wait_request(struct ceph_osd_client *osdc, + struct ceph_osd_request *req); +extern void ceph_osdc_sync(struct ceph_osd_client *osdc); + +extern int ceph_osdc_readpages(struct ceph_osd_client *osdc, + struct ceph_vino vino, + struct ceph_file_layout *layout, + u64 off, u64 *plen, + u32 truncate_seq, u64 truncate_size, + struct page **pages, int nr_pages); + +extern int ceph_osdc_writepages(struct ceph_osd_client *osdc, + struct ceph_vino vino, + struct ceph_file_layout *layout, + struct ceph_snap_context *sc, + u64 off, u64 len, + u32 truncate_seq, u64 truncate_size, + struct timespec *mtime, + struct page **pages, int nr_pages, + int flags, int do_sync, bool nofail); + +#endif + diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c new file mode 100644 index 000000000000..e38fe6309b1c --- /dev/null +++ b/fs/ceph/osdmap.c @@ -0,0 +1,875 @@ + +#include + +#include "super.h" +#include "osdmap.h" +#include "crush/hash.h" +#include "crush/mapper.h" +#include "decode.h" +#include "ceph_debug.h" + +char *ceph_osdmap_state_str(char *str, int len, int state) +{ + int flag = 0; + + if (!len) + goto done; + + *str = '\0'; + if (state) { + if (state & CEPH_OSD_EXISTS) { + snprintf(str, len, "exists"); + flag = 1; + } + if (state & CEPH_OSD_UP) { + snprintf(str, len, "%s%s%s", str, (flag ? ", " : ""), + "up"); + flag = 1; + } + } else { + snprintf(str, len, "doesn't exist"); + } +done: + return str; +} + +/* maps */ + +static int calc_bits_of(unsigned t) +{ + int b = 0; + while (t) { + t = t >> 1; + b++; + } + return b; +} + +/* + * the foo_mask is the smallest value 2^n-1 that is >= foo. + */ +static void calc_pg_masks(struct ceph_pg_pool_info *pi) +{ + pi->pg_num_mask = (1 << calc_bits_of(le32_to_cpu(pi->v.pg_num)-1)) - 1; + pi->pgp_num_mask = + (1 << calc_bits_of(le32_to_cpu(pi->v.pgp_num)-1)) - 1; + pi->lpg_num_mask = + (1 << calc_bits_of(le32_to_cpu(pi->v.lpg_num)-1)) - 1; + pi->lpgp_num_mask = + (1 << calc_bits_of(le32_to_cpu(pi->v.lpgp_num)-1)) - 1; +} + +/* + * decode crush map + */ +static int crush_decode_uniform_bucket(void **p, void *end, + struct crush_bucket_uniform *b) +{ + dout("crush_decode_uniform_bucket %p to %p\n", *p, end); + ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad); + ceph_decode_32(p, b->item_weight); + return 0; +bad: + return -EINVAL; +} + +static int crush_decode_list_bucket(void **p, void *end, + struct crush_bucket_list *b) +{ + int j; + dout("crush_decode_list_bucket %p to %p\n", *p, end); + b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); + if (b->item_weights == NULL) + return -ENOMEM; + b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); + if (b->sum_weights == NULL) + return -ENOMEM; + ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); + for (j = 0; j < b->h.size; j++) { + ceph_decode_32(p, b->item_weights[j]); + ceph_decode_32(p, b->sum_weights[j]); + } + return 0; +bad: + return -EINVAL; +} + +static int crush_decode_tree_bucket(void **p, void *end, + struct crush_bucket_tree *b) +{ + int j; + dout("crush_decode_tree_bucket %p to %p\n", *p, end); + ceph_decode_32_safe(p, end, b->num_nodes, bad); + b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS); + if (b->node_weights == NULL) + return -ENOMEM; + ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad); + for (j = 0; j < b->num_nodes; j++) + ceph_decode_32(p, b->node_weights[j]); + return 0; +bad: + return -EINVAL; +} + +static int crush_decode_straw_bucket(void **p, void *end, + struct crush_bucket_straw *b) +{ + int j; + dout("crush_decode_straw_bucket %p to %p\n", *p, end); + b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); + if (b->item_weights == NULL) + return -ENOMEM; + b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); + if (b->straws == NULL) + return -ENOMEM; + ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); + for (j = 0; j < b->h.size; j++) { + ceph_decode_32(p, b->item_weights[j]); + ceph_decode_32(p, b->straws[j]); + } + return 0; +bad: + return -EINVAL; +} + +static struct crush_map *crush_decode(void *pbyval, void *end) +{ + struct crush_map *c; + int err = -EINVAL; + int i, j; + void **p = &pbyval; + void *start = pbyval; + u32 magic; + + dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p)); + + c = kzalloc(sizeof(*c), GFP_NOFS); + if (c == NULL) + return ERR_PTR(-ENOMEM); + + ceph_decode_need(p, end, 4*sizeof(u32), bad); + ceph_decode_32(p, magic); + if (magic != CRUSH_MAGIC) { + pr_err("crush_decode magic %x != current %x\n", + (unsigned)magic, (unsigned)CRUSH_MAGIC); + goto bad; + } + ceph_decode_32(p, c->max_buckets); + ceph_decode_32(p, c->max_rules); + ceph_decode_32(p, c->max_devices); + + c->device_parents = kcalloc(c->max_devices, sizeof(u32), GFP_NOFS); + if (c->device_parents == NULL) + goto badmem; + c->bucket_parents = kcalloc(c->max_buckets, sizeof(u32), GFP_NOFS); + if (c->bucket_parents == NULL) + goto badmem; + + c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS); + if (c->buckets == NULL) + goto badmem; + c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS); + if (c->rules == NULL) + goto badmem; + + /* buckets */ + for (i = 0; i < c->max_buckets; i++) { + int size = 0; + u32 alg; + struct crush_bucket *b; + + ceph_decode_32_safe(p, end, alg, bad); + if (alg == 0) { + c->buckets[i] = NULL; + continue; + } + dout("crush_decode bucket %d off %x %p to %p\n", + i, (int)(*p-start), *p, end); + + switch (alg) { + case CRUSH_BUCKET_UNIFORM: + size = sizeof(struct crush_bucket_uniform); + break; + case CRUSH_BUCKET_LIST: + size = sizeof(struct crush_bucket_list); + break; + case CRUSH_BUCKET_TREE: + size = sizeof(struct crush_bucket_tree); + break; + case CRUSH_BUCKET_STRAW: + size = sizeof(struct crush_bucket_straw); + break; + default: + goto bad; + } + BUG_ON(size == 0); + b = c->buckets[i] = kzalloc(size, GFP_NOFS); + if (b == NULL) + goto badmem; + + ceph_decode_need(p, end, 4*sizeof(u32), bad); + ceph_decode_32(p, b->id); + ceph_decode_16(p, b->type); + ceph_decode_16(p, b->alg); + ceph_decode_32(p, b->weight); + ceph_decode_32(p, b->size); + + dout("crush_decode bucket size %d off %x %p to %p\n", + b->size, (int)(*p-start), *p, end); + + b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS); + if (b->items == NULL) + goto badmem; + b->perm = kcalloc(b->size, sizeof(u32), GFP_NOFS); + if (b->perm == NULL) + goto badmem; + b->perm_n = 0; + + ceph_decode_need(p, end, b->size*sizeof(u32), bad); + for (j = 0; j < b->size; j++) + ceph_decode_32(p, b->items[j]); + + switch (b->alg) { + case CRUSH_BUCKET_UNIFORM: + err = crush_decode_uniform_bucket(p, end, + (struct crush_bucket_uniform *)b); + if (err < 0) + goto bad; + break; + case CRUSH_BUCKET_LIST: + err = crush_decode_list_bucket(p, end, + (struct crush_bucket_list *)b); + if (err < 0) + goto bad; + break; + case CRUSH_BUCKET_TREE: + err = crush_decode_tree_bucket(p, end, + (struct crush_bucket_tree *)b); + if (err < 0) + goto bad; + break; + case CRUSH_BUCKET_STRAW: + err = crush_decode_straw_bucket(p, end, + (struct crush_bucket_straw *)b); + if (err < 0) + goto bad; + break; + } + } + + /* rules */ + dout("rule vec is %p\n", c->rules); + for (i = 0; i < c->max_rules; i++) { + u32 yes; + struct crush_rule *r; + + ceph_decode_32_safe(p, end, yes, bad); + if (!yes) { + dout("crush_decode NO rule %d off %x %p to %p\n", + i, (int)(*p-start), *p, end); + c->rules[i] = NULL; + continue; + } + + dout("crush_decode rule %d off %x %p to %p\n", + i, (int)(*p-start), *p, end); + + /* len */ + ceph_decode_32_safe(p, end, yes, bad); +#if BITS_PER_LONG == 32 + if (yes > ULONG_MAX / sizeof(struct crush_rule_step)) + goto bad; +#endif + r = c->rules[i] = kmalloc(sizeof(*r) + + yes*sizeof(struct crush_rule_step), + GFP_NOFS); + if (r == NULL) + goto badmem; + dout(" rule %d is at %p\n", i, r); + r->len = yes; + ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */ + ceph_decode_need(p, end, r->len*3*sizeof(u32), bad); + for (j = 0; j < r->len; j++) { + ceph_decode_32(p, r->steps[j].op); + ceph_decode_32(p, r->steps[j].arg1); + ceph_decode_32(p, r->steps[j].arg2); + } + } + + /* ignore trailing name maps. */ + + dout("crush_decode success\n"); + return c; + +badmem: + err = -ENOMEM; +bad: + dout("crush_decode fail %d\n", err); + crush_destroy(c); + return ERR_PTR(err); +} + + +/* + * osd map + */ +void ceph_osdmap_destroy(struct ceph_osdmap *map) +{ + dout("osdmap_destroy %p\n", map); + if (map->crush) + crush_destroy(map->crush); + while (!RB_EMPTY_ROOT(&map->pg_temp)) + rb_erase(rb_first(&map->pg_temp), &map->pg_temp); + kfree(map->osd_state); + kfree(map->osd_weight); + kfree(map->pg_pool); + kfree(map->osd_addr); + kfree(map); +} + +/* + * adjust max osd value. reallocate arrays. + */ +static int osdmap_set_max_osd(struct ceph_osdmap *map, int max) +{ + u8 *state; + struct ceph_entity_addr *addr; + u32 *weight; + + state = kcalloc(max, sizeof(*state), GFP_NOFS); + addr = kcalloc(max, sizeof(*addr), GFP_NOFS); + weight = kcalloc(max, sizeof(*weight), GFP_NOFS); + if (state == NULL || addr == NULL || weight == NULL) { + kfree(state); + kfree(addr); + kfree(weight); + return -ENOMEM; + } + + /* copy old? */ + if (map->osd_state) { + memcpy(state, map->osd_state, map->max_osd*sizeof(*state)); + memcpy(addr, map->osd_addr, map->max_osd*sizeof(*addr)); + memcpy(weight, map->osd_weight, map->max_osd*sizeof(*weight)); + kfree(map->osd_state); + kfree(map->osd_addr); + kfree(map->osd_weight); + } + + map->osd_state = state; + map->osd_weight = weight; + map->osd_addr = addr; + map->max_osd = max; + return 0; +} + +/* + * Insert a new pg_temp mapping + */ +static void __insert_pg_mapping(struct ceph_pg_mapping *new, + struct rb_root *root) +{ + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; + struct ceph_pg_mapping *pg = NULL; + + while (*p) { + parent = *p; + pg = rb_entry(parent, struct ceph_pg_mapping, node); + if (new->pgid < pg->pgid) + p = &(*p)->rb_left; + else if (new->pgid > pg->pgid) + p = &(*p)->rb_right; + else + BUG(); + } + + rb_link_node(&new->node, parent, p); + rb_insert_color(&new->node, root); +} + +/* + * decode a full map. + */ +struct ceph_osdmap *osdmap_decode(void **p, void *end) +{ + struct ceph_osdmap *map; + u16 version; + u32 len, max, i; + int err = -EINVAL; + void *start = *p; + + dout("osdmap_decode %p to %p len %d\n", *p, end, (int)(end - *p)); + + map = kzalloc(sizeof(*map), GFP_NOFS); + if (map == NULL) + return ERR_PTR(-ENOMEM); + map->pg_temp = RB_ROOT; + + ceph_decode_16_safe(p, end, version, bad); + + ceph_decode_need(p, end, 2*sizeof(u64)+6*sizeof(u32), bad); + ceph_decode_copy(p, &map->fsid, sizeof(map->fsid)); + ceph_decode_32(p, map->epoch); + ceph_decode_copy(p, &map->created, sizeof(map->created)); + ceph_decode_copy(p, &map->modified, sizeof(map->modified)); + + ceph_decode_32(p, map->num_pools); + map->pg_pool = kcalloc(map->num_pools, sizeof(*map->pg_pool), + GFP_NOFS); + if (!map->pg_pool) { + err = -ENOMEM; + goto bad; + } + ceph_decode_32_safe(p, end, max, bad); + while (max--) { + ceph_decode_need(p, end, 4+sizeof(map->pg_pool->v), bad); + ceph_decode_32(p, i); + if (i >= map->num_pools) + goto bad; + ceph_decode_copy(p, &map->pg_pool[i].v, + sizeof(map->pg_pool->v)); + calc_pg_masks(&map->pg_pool[i]); + p += le32_to_cpu(map->pg_pool[i].v.num_snaps) * sizeof(u64); + p += le32_to_cpu(map->pg_pool[i].v.num_removed_snap_intervals) + * sizeof(u64) * 2; + } + + ceph_decode_32_safe(p, end, map->flags, bad); + + ceph_decode_32(p, max); + + /* (re)alloc osd arrays */ + err = osdmap_set_max_osd(map, max); + if (err < 0) + goto bad; + dout("osdmap_decode max_osd = %d\n", map->max_osd); + + /* osds */ + err = -EINVAL; + ceph_decode_need(p, end, 3*sizeof(u32) + + map->max_osd*(1 + sizeof(*map->osd_weight) + + sizeof(*map->osd_addr)), bad); + *p += 4; /* skip length field (should match max) */ + ceph_decode_copy(p, map->osd_state, map->max_osd); + + *p += 4; /* skip length field (should match max) */ + for (i = 0; i < map->max_osd; i++) + ceph_decode_32(p, map->osd_weight[i]); + + *p += 4; /* skip length field (should match max) */ + ceph_decode_copy(p, map->osd_addr, map->max_osd*sizeof(*map->osd_addr)); + + /* pg_temp */ + ceph_decode_32_safe(p, end, len, bad); + for (i = 0; i < len; i++) { + int n, j; + u64 pgid; + struct ceph_pg_mapping *pg; + + ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad); + ceph_decode_64(p, pgid); + ceph_decode_32(p, n); + ceph_decode_need(p, end, n * sizeof(u32), bad); + pg = kmalloc(sizeof(*pg) + n*sizeof(u32), GFP_NOFS); + if (!pg) { + err = -ENOMEM; + goto bad; + } + pg->pgid = pgid; + pg->len = n; + for (j = 0; j < n; j++) + ceph_decode_32(p, pg->osds[j]); + + __insert_pg_mapping(pg, &map->pg_temp); + dout(" added pg_temp %llx len %d\n", pgid, len); + } + + /* crush */ + ceph_decode_32_safe(p, end, len, bad); + dout("osdmap_decode crush len %d from off 0x%x\n", len, + (int)(*p - start)); + ceph_decode_need(p, end, len, bad); + map->crush = crush_decode(*p, end); + *p += len; + if (IS_ERR(map->crush)) { + err = PTR_ERR(map->crush); + map->crush = NULL; + goto bad; + } + + /* ignore the rest of the map */ + *p = end; + + dout("osdmap_decode done %p %p\n", *p, end); + return map; + +bad: + dout("osdmap_decode fail\n"); + ceph_osdmap_destroy(map); + return ERR_PTR(err); +} + +/* + * decode and apply an incremental map update. + */ +struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, + struct ceph_osdmap *map, + struct ceph_messenger *msgr) +{ + struct ceph_osdmap *newmap = map; + struct crush_map *newcrush = NULL; + struct ceph_fsid fsid; + u32 epoch = 0; + struct ceph_timespec modified; + u32 len, pool; + __s32 new_flags, max; + void *start = *p; + int err = -EINVAL; + u16 version; + struct rb_node *rbp; + + ceph_decode_16_safe(p, end, version, bad); + + ceph_decode_need(p, end, sizeof(fsid)+sizeof(modified)+2*sizeof(u32), + bad); + ceph_decode_copy(p, &fsid, sizeof(fsid)); + ceph_decode_32(p, epoch); + BUG_ON(epoch != map->epoch+1); + ceph_decode_copy(p, &modified, sizeof(modified)); + ceph_decode_32(p, new_flags); + + /* full map? */ + ceph_decode_32_safe(p, end, len, bad); + if (len > 0) { + dout("apply_incremental full map len %d, %p to %p\n", + len, *p, end); + newmap = osdmap_decode(p, min(*p+len, end)); + return newmap; /* error or not */ + } + + /* new crush? */ + ceph_decode_32_safe(p, end, len, bad); + if (len > 0) { + dout("apply_incremental new crush map len %d, %p to %p\n", + len, *p, end); + newcrush = crush_decode(*p, min(*p+len, end)); + if (IS_ERR(newcrush)) + return ERR_PTR(PTR_ERR(newcrush)); + } + + /* new flags? */ + if (new_flags >= 0) + map->flags = new_flags; + + ceph_decode_need(p, end, 5*sizeof(u32), bad); + + /* new max? */ + ceph_decode_32(p, max); + if (max >= 0) { + err = osdmap_set_max_osd(map, max); + if (err < 0) + goto bad; + } + + map->epoch++; + map->modified = map->modified; + if (newcrush) { + if (map->crush) + crush_destroy(map->crush); + map->crush = newcrush; + newcrush = NULL; + } + + /* new_pool */ + ceph_decode_32_safe(p, end, len, bad); + while (len--) { + ceph_decode_32_safe(p, end, pool, bad); + if (pool >= map->num_pools) { + void *pg_pool = kcalloc(pool + 1, + sizeof(*map->pg_pool), + GFP_NOFS); + if (!pg_pool) { + err = -ENOMEM; + goto bad; + } + memcpy(pg_pool, map->pg_pool, + map->num_pools * sizeof(*map->pg_pool)); + kfree(map->pg_pool); + map->pg_pool = pg_pool; + map->num_pools = pool+1; + } + ceph_decode_copy(p, &map->pg_pool[pool].v, + sizeof(map->pg_pool->v)); + calc_pg_masks(&map->pg_pool[pool]); + } + + /* old_pool (ignore) */ + ceph_decode_32_safe(p, end, len, bad); + *p += len * sizeof(u32); + + /* new_up */ + err = -EINVAL; + ceph_decode_32_safe(p, end, len, bad); + while (len--) { + u32 osd; + struct ceph_entity_addr addr; + ceph_decode_32_safe(p, end, osd, bad); + ceph_decode_copy_safe(p, end, &addr, sizeof(addr), bad); + pr_info("osd%d up\n", osd); + BUG_ON(osd >= map->max_osd); + map->osd_state[osd] |= CEPH_OSD_UP; + map->osd_addr[osd] = addr; + } + + /* new_down */ + ceph_decode_32_safe(p, end, len, bad); + while (len--) { + u32 osd; + ceph_decode_32_safe(p, end, osd, bad); + (*p)++; /* clean flag */ + pr_info("ceph osd%d down\n", osd); + if (osd < map->max_osd) + map->osd_state[osd] &= ~CEPH_OSD_UP; + } + + /* new_weight */ + ceph_decode_32_safe(p, end, len, bad); + while (len--) { + u32 osd, off; + ceph_decode_need(p, end, sizeof(u32)*2, bad); + ceph_decode_32(p, osd); + ceph_decode_32(p, off); + pr_info("osd%d weight 0x%x %s\n", osd, off, + off == CEPH_OSD_IN ? "(in)" : + (off == CEPH_OSD_OUT ? "(out)" : "")); + if (osd < map->max_osd) + map->osd_weight[osd] = off; + } + + /* new_pg_temp */ + rbp = rb_first(&map->pg_temp); + ceph_decode_32_safe(p, end, len, bad); + while (len--) { + struct ceph_pg_mapping *pg; + int j; + u64 pgid; + u32 pglen; + ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad); + ceph_decode_64(p, pgid); + ceph_decode_32(p, pglen); + + /* remove any? */ + while (rbp && rb_entry(rbp, struct ceph_pg_mapping, + node)->pgid <= pgid) { + struct rb_node *cur = rbp; + rbp = rb_next(rbp); + dout(" removed pg_temp %llx\n", + rb_entry(cur, struct ceph_pg_mapping, node)->pgid); + rb_erase(cur, &map->pg_temp); + } + + if (pglen) { + /* insert */ + ceph_decode_need(p, end, pglen*sizeof(u32), bad); + pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS); + if (!pg) { + err = -ENOMEM; + goto bad; + } + pg->pgid = pgid; + pg->len = pglen; + for (j = 0; j < len; j++) + ceph_decode_32(p, pg->osds[j]); + __insert_pg_mapping(pg, &map->pg_temp); + dout(" added pg_temp %llx len %d\n", pgid, pglen); + } + } + while (rbp) { + struct rb_node *cur = rbp; + rbp = rb_next(rbp); + dout(" removed pg_temp %llx\n", + rb_entry(cur, struct ceph_pg_mapping, node)->pgid); + rb_erase(cur, &map->pg_temp); + } + + /* ignore the rest */ + *p = end; + return map; + +bad: + pr_err("corrupt inc osdmap epoch %d off %d (%p of %p-%p)\n", + epoch, (int)(*p - start), *p, start, end); + if (newcrush) + crush_destroy(newcrush); + return ERR_PTR(err); +} + + + + +/* + * calculate file layout from given offset, length. + * fill in correct oid, logical length, and object extent + * offset, length. + * + * for now, we write only a single su, until we can + * pass a stride back to the caller. + */ +void ceph_calc_file_object_mapping(struct ceph_file_layout *layout, + u64 off, u64 *plen, + u64 *bno, + u64 *oxoff, u64 *oxlen) +{ + u32 osize = le32_to_cpu(layout->fl_object_size); + u32 su = le32_to_cpu(layout->fl_stripe_unit); + u32 sc = le32_to_cpu(layout->fl_stripe_count); + u32 bl, stripeno, stripepos, objsetno; + u32 su_per_object; + u64 t; + + dout("mapping %llu~%llu osize %u fl_su %u\n", off, *plen, + osize, su); + su_per_object = osize / le32_to_cpu(layout->fl_stripe_unit); + dout("osize %u / su %u = su_per_object %u\n", osize, su, + su_per_object); + + BUG_ON((su & ~PAGE_MASK) != 0); + /* bl = *off / su; */ + t = off; + do_div(t, su); + bl = t; + dout("off %llu / su %u = bl %u\n", off, su, bl); + + stripeno = bl / sc; + stripepos = bl % sc; + objsetno = stripeno / su_per_object; + + *bno = objsetno * sc + stripepos; + dout("objset %u * sc %u = bno %u\n", objsetno, sc, (unsigned)*bno); + /* *oxoff = *off / layout->fl_stripe_unit; */ + t = off; + *oxoff = do_div(t, su); + *oxlen = min_t(u64, *plen, su - *oxoff); + *plen = *oxlen; + + dout(" obj extent %llu~%llu\n", *oxoff, *oxlen); +} + +/* + * calculate an object layout (i.e. pgid) from an oid, + * file_layout, and osdmap + */ +int ceph_calc_object_layout(struct ceph_object_layout *ol, + const char *oid, + struct ceph_file_layout *fl, + struct ceph_osdmap *osdmap) +{ + unsigned num, num_mask; + union ceph_pg pgid; + s32 preferred = (s32)le32_to_cpu(fl->fl_pg_preferred); + int poolid = le32_to_cpu(fl->fl_pg_pool); + struct ceph_pg_pool_info *pool; + + if (poolid >= osdmap->num_pools) + return -EIO; + pool = &osdmap->pg_pool[poolid]; + + if (preferred >= 0) { + num = le32_to_cpu(pool->v.lpg_num); + num_mask = pool->lpg_num_mask; + } else { + num = le32_to_cpu(pool->v.pg_num); + num_mask = pool->pg_num_mask; + } + + pgid.pg64 = 0; /* start with it zeroed out */ + pgid.pg.ps = ceph_full_name_hash(oid, strlen(oid)); + pgid.pg.preferred = preferred; + pgid.pg.pool = le32_to_cpu(fl->fl_pg_pool); + if (preferred >= 0) + dout("calc_object_layout '%s' pgid %d.%xp%d (%llx)\n", oid, + pgid.pg.pool, pgid.pg.ps, (int)preferred, pgid.pg64); + else + dout("calc_object_layout '%s' pgid %d.%x (%llx)\n", oid, + pgid.pg.pool, pgid.pg.ps, pgid.pg64); + + ol->ol_pgid = cpu_to_le64(pgid.pg64); + ol->ol_stripe_unit = fl->fl_object_stripe_unit; + + return 0; +} + +/* + * Calculate raw osd vector for the given pgid. Return pointer to osd + * array, or NULL on failure. + */ +static int *calc_pg_raw(struct ceph_osdmap *osdmap, union ceph_pg pgid, + int *osds, int *num) +{ + struct rb_node *n = osdmap->pg_temp.rb_node; + struct ceph_pg_mapping *pg; + struct ceph_pg_pool_info *pool; + int ruleno; + unsigned pps; /* placement ps */ + + /* pg_temp? */ + while (n) { + pg = rb_entry(n, struct ceph_pg_mapping, node); + if (pgid.pg64 < pg->pgid) + n = n->rb_left; + else if (pgid.pg64 > pg->pgid) + n = n->rb_right; + else { + *num = pg->len; + return pg->osds; + } + } + + /* crush */ + if (pgid.pg.pool >= osdmap->num_pools) + return NULL; + pool = &osdmap->pg_pool[pgid.pg.pool]; + ruleno = crush_find_rule(osdmap->crush, pool->v.crush_ruleset, + pool->v.type, pool->v.size); + if (ruleno < 0) { + pr_err("no crush rule pool %d type %d size %d\n", + pgid.pg.pool, pool->v.type, pool->v.size); + return NULL; + } + + if (pgid.pg.preferred >= 0) + pps = ceph_stable_mod(pgid.pg.ps, + le32_to_cpu(pool->v.lpgp_num), + pool->lpgp_num_mask); + else + pps = ceph_stable_mod(pgid.pg.ps, + le32_to_cpu(pool->v.pgp_num), + pool->pgp_num_mask); + pps += pgid.pg.pool; + *num = crush_do_rule(osdmap->crush, ruleno, pps, osds, + min_t(int, pool->v.size, *num), + pgid.pg.preferred, osdmap->osd_weight); + return osds; +} + +/* + * Return primary osd for given pgid, or -1 if none. + */ +int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, union ceph_pg pgid) +{ + int rawosds[10], *osds; + int i, num = ARRAY_SIZE(rawosds); + + osds = calc_pg_raw(osdmap, pgid, rawosds, &num); + if (!osds) + return -1; + + /* primary is first up osd */ + for (i = 0; i < num; i++) + if (ceph_osd_is_up(osdmap, osds[i])) { + return osds[i]; + break; + } + return -1; +} diff --git a/fs/ceph/osdmap.h b/fs/ceph/osdmap.h new file mode 100644 index 000000000000..07127c6fb134 --- /dev/null +++ b/fs/ceph/osdmap.h @@ -0,0 +1,123 @@ +#ifndef _FS_CEPH_OSDMAP_H +#define _FS_CEPH_OSDMAP_H + +#include +#include "types.h" +#include "ceph_fs.h" +#include "crush/crush.h" + +/* + * The osd map describes the current membership of the osd cluster and + * specifies the mapping of objects to placement groups and placement + * groups to (sets of) osds. That is, it completely specifies the + * (desired) distribution of all data objects in the system at some + * point in time. + * + * Each map version is identified by an epoch, which increases monotonically. + * + * The map can be updated either via an incremental map (diff) describing + * the change between two successive epochs, or as a fully encoded map. + */ +struct ceph_pg_pool_info { + struct ceph_pg_pool v; + int pg_num_mask, pgp_num_mask, lpg_num_mask, lpgp_num_mask; +}; + +struct ceph_pg_mapping { + struct rb_node node; + u64 pgid; + int len; + int osds[]; +}; + +struct ceph_osdmap { + struct ceph_fsid fsid; + u32 epoch; + u32 mkfs_epoch; + struct ceph_timespec created, modified; + + u32 flags; /* CEPH_OSDMAP_* */ + + u32 max_osd; /* size of osd_state, _offload, _addr arrays */ + u8 *osd_state; /* CEPH_OSD_* */ + u32 *osd_weight; /* 0 = failed, 0x10000 = 100% normal */ + struct ceph_entity_addr *osd_addr; + + struct rb_root pg_temp; + + u32 num_pools; + struct ceph_pg_pool_info *pg_pool; + + /* the CRUSH map specifies the mapping of placement groups to + * the list of osds that store+replicate them. */ + struct crush_map *crush; +}; + +/* + * file layout helpers + */ +#define ceph_file_layout_su(l) ((__s32)le32_to_cpu((l).fl_stripe_unit)) +#define ceph_file_layout_stripe_count(l) \ + ((__s32)le32_to_cpu((l).fl_stripe_count)) +#define ceph_file_layout_object_size(l) ((__s32)le32_to_cpu((l).fl_object_size)) +#define ceph_file_layout_cas_hash(l) ((__s32)le32_to_cpu((l).fl_cas_hash)) +#define ceph_file_layout_object_su(l) \ + ((__s32)le32_to_cpu((l).fl_object_stripe_unit)) +#define ceph_file_layout_pg_preferred(l) \ + ((__s32)le32_to_cpu((l).fl_pg_preferred)) +#define ceph_file_layout_pg_pool(l) \ + ((__s32)le32_to_cpu((l).fl_pg_pool)) + +static inline unsigned ceph_file_layout_stripe_width(struct ceph_file_layout *l) +{ + return le32_to_cpu(l->fl_stripe_unit) * + le32_to_cpu(l->fl_stripe_count); +} + +/* "period" == bytes before i start on a new set of objects */ +static inline unsigned ceph_file_layout_period(struct ceph_file_layout *l) +{ + return le32_to_cpu(l->fl_object_size) * + le32_to_cpu(l->fl_stripe_count); +} + + +static inline int ceph_osd_is_up(struct ceph_osdmap *map, int osd) +{ + return (osd < map->max_osd) && (map->osd_state[osd] & CEPH_OSD_UP); +} + +static inline bool ceph_osdmap_flag(struct ceph_osdmap *map, int flag) +{ + return map && (map->flags & flag); +} + +extern char *ceph_osdmap_state_str(char *str, int len, int state); + +static inline struct ceph_entity_addr *ceph_osd_addr(struct ceph_osdmap *map, + int osd) +{ + if (osd >= map->max_osd) + return NULL; + return &map->osd_addr[osd]; +} + +extern struct ceph_osdmap *osdmap_decode(void **p, void *end); +extern struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, + struct ceph_osdmap *map, + struct ceph_messenger *msgr); +extern void ceph_osdmap_destroy(struct ceph_osdmap *map); + +/* calculate mapping of a file extent to an object */ +extern void ceph_calc_file_object_mapping(struct ceph_file_layout *layout, + u64 off, u64 *plen, + u64 *bno, u64 *oxoff, u64 *oxlen); + +/* calculate mapping of object to a placement group */ +extern int ceph_calc_object_layout(struct ceph_object_layout *ol, + const char *oid, + struct ceph_file_layout *fl, + struct ceph_osdmap *osdmap); +extern int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, union ceph_pg pgid); + +#endif -- cgit v1.2.3 From 5ecc0a0f8128b1876e8614638deaed49cc8b174c Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 6 Oct 2009 11:31:11 -0700 Subject: ceph: CRUSH mapping algorithm CRUSH is a pseudorandom data distribution function designed to map inputs onto a dynamic hierarchy of devices, while minimizing the extent to which inputs are remapped when the devices are added or removed. It includes some features that are specifically useful for storage, most notably the ability to map each input onto a set of N devices that are separated across administrator-defined failure domains. CRUSH is used to distribute data across the cluster of Ceph storage nodes. More information about CRUSH can be found in this paper: http://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf Signed-off-by: Sage Weil --- fs/ceph/crush/crush.c | 140 ++++++++++++ fs/ceph/crush/crush.h | 188 ++++++++++++++++ fs/ceph/crush/hash.h | 90 ++++++++ fs/ceph/crush/mapper.c | 589 +++++++++++++++++++++++++++++++++++++++++++++++++ fs/ceph/crush/mapper.h | 20 ++ 5 files changed, 1027 insertions(+) create mode 100644 fs/ceph/crush/crush.c create mode 100644 fs/ceph/crush/crush.h create mode 100644 fs/ceph/crush/hash.h create mode 100644 fs/ceph/crush/mapper.c create mode 100644 fs/ceph/crush/mapper.h diff --git a/fs/ceph/crush/crush.c b/fs/ceph/crush/crush.c new file mode 100644 index 000000000000..13755cdc4fb3 --- /dev/null +++ b/fs/ceph/crush/crush.c @@ -0,0 +1,140 @@ + +#ifdef __KERNEL__ +# include +#else +# include +# include +# define kfree(x) do { if (x) free(x); } while (0) +# define BUG_ON(x) assert(!(x)) +#endif + +#include "crush.h" + +/** + * crush_get_bucket_item_weight - Get weight of an item in given bucket + * @b: bucket pointer + * @p: item index in bucket + */ +int crush_get_bucket_item_weight(struct crush_bucket *b, int p) +{ + if (p >= b->size) + return 0; + + switch (b->alg) { + case CRUSH_BUCKET_UNIFORM: + return ((struct crush_bucket_uniform *)b)->item_weight; + case CRUSH_BUCKET_LIST: + return ((struct crush_bucket_list *)b)->item_weights[p]; + case CRUSH_BUCKET_TREE: + if (p & 1) + return ((struct crush_bucket_tree *)b)->node_weights[p]; + return 0; + case CRUSH_BUCKET_STRAW: + return ((struct crush_bucket_straw *)b)->item_weights[p]; + } + return 0; +} + +/** + * crush_calc_parents - Calculate parent vectors for the given crush map. + * @map: crush_map pointer + */ +void crush_calc_parents(struct crush_map *map) +{ + int i, b, c; + + for (b = 0; b < map->max_buckets; b++) { + if (map->buckets[b] == NULL) + continue; + for (i = 0; i < map->buckets[b]->size; i++) { + c = map->buckets[b]->items[i]; + BUG_ON(c >= map->max_devices || + c < -map->max_buckets); + if (c >= 0) + map->device_parents[c] = map->buckets[b]->id; + else + map->bucket_parents[-1-c] = map->buckets[b]->id; + } + } +} + +void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b) +{ + kfree(b->h.perm); + kfree(b->h.items); + kfree(b); +} + +void crush_destroy_bucket_list(struct crush_bucket_list *b) +{ + kfree(b->item_weights); + kfree(b->sum_weights); + kfree(b->h.perm); + kfree(b->h.items); + kfree(b); +} + +void crush_destroy_bucket_tree(struct crush_bucket_tree *b) +{ + kfree(b->node_weights); + kfree(b); +} + +void crush_destroy_bucket_straw(struct crush_bucket_straw *b) +{ + kfree(b->straws); + kfree(b->item_weights); + kfree(b->h.perm); + kfree(b->h.items); + kfree(b); +} + +void crush_destroy_bucket(struct crush_bucket *b) +{ + switch (b->alg) { + case CRUSH_BUCKET_UNIFORM: + crush_destroy_bucket_uniform((struct crush_bucket_uniform *)b); + break; + case CRUSH_BUCKET_LIST: + crush_destroy_bucket_list((struct crush_bucket_list *)b); + break; + case CRUSH_BUCKET_TREE: + crush_destroy_bucket_tree((struct crush_bucket_tree *)b); + break; + case CRUSH_BUCKET_STRAW: + crush_destroy_bucket_straw((struct crush_bucket_straw *)b); + break; + } +} + +/** + * crush_destroy - Destroy a crush_map + * @map: crush_map pointer + */ +void crush_destroy(struct crush_map *map) +{ + int b; + + /* buckets */ + if (map->buckets) { + for (b = 0; b < map->max_buckets; b++) { + if (map->buckets[b] == NULL) + continue; + crush_destroy_bucket(map->buckets[b]); + } + kfree(map->buckets); + } + + /* rules */ + if (map->rules) { + for (b = 0; b < map->max_rules; b++) + kfree(map->rules[b]); + kfree(map->rules); + } + + kfree(map->bucket_parents); + kfree(map->device_parents); + kfree(map); +} + + diff --git a/fs/ceph/crush/crush.h b/fs/ceph/crush/crush.h new file mode 100644 index 000000000000..9ac7e091126f --- /dev/null +++ b/fs/ceph/crush/crush.h @@ -0,0 +1,188 @@ +#ifndef _CRUSH_CRUSH_H +#define _CRUSH_CRUSH_H + +#include + +/* + * CRUSH is a pseudo-random data distribution algorithm that + * efficiently distributes input values (typically, data objects) + * across a heterogeneous, structured storage cluster. + * + * The algorithm was originally described in detail in this paper + * (although the algorithm has evolved somewhat since then): + * + * http://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf + * + * LGPL2 + */ + + +#define CRUSH_MAGIC 0x00010000ul /* for detecting algorithm revisions */ + + +#define CRUSH_MAX_DEPTH 10 /* max crush hierarchy depth */ +#define CRUSH_MAX_SET 10 /* max size of a mapping result */ + + +/* + * CRUSH uses user-defined "rules" to describe how inputs should be + * mapped to devices. A rule consists of sequence of steps to perform + * to generate the set of output devices. + */ +struct crush_rule_step { + __u32 op; + __s32 arg1; + __s32 arg2; +}; + +/* step op codes */ +enum { + CRUSH_RULE_NOOP = 0, + CRUSH_RULE_TAKE = 1, /* arg1 = value to start with */ + CRUSH_RULE_CHOOSE_FIRSTN = 2, /* arg1 = num items to pick */ + /* arg2 = type */ + CRUSH_RULE_CHOOSE_INDEP = 3, /* same */ + CRUSH_RULE_EMIT = 4, /* no args */ + CRUSH_RULE_CHOOSE_LEAF_FIRSTN = 6, + CRUSH_RULE_CHOOSE_LEAF_INDEP = 7, +}; + +/* + * for specifying choose num (arg1) relative to the max parameter + * passed to do_rule + */ +#define CRUSH_CHOOSE_N 0 +#define CRUSH_CHOOSE_N_MINUS(x) (-(x)) + +/* + * The rule mask is used to describe what the rule is intended for. + * Given a ruleset and size of output set, we search through the + * rule list for a matching rule_mask. + */ +struct crush_rule_mask { + __u8 ruleset; + __u8 type; + __u8 min_size; + __u8 max_size; +}; + +struct crush_rule { + __u32 len; + struct crush_rule_mask mask; + struct crush_rule_step steps[0]; +}; + +#define crush_rule_size(len) (sizeof(struct crush_rule) + \ + (len)*sizeof(struct crush_rule_step)) + + + +/* + * A bucket is a named container of other items (either devices or + * other buckets). Items within a bucket are chosen using one of a + * few different algorithms. The table summarizes how the speed of + * each option measures up against mapping stability when items are + * added or removed. + * + * Bucket Alg Speed Additions Removals + * ------------------------------------------------ + * uniform O(1) poor poor + * list O(n) optimal poor + * tree O(log n) good good + * straw O(n) optimal optimal + */ +enum { + CRUSH_BUCKET_UNIFORM = 1, + CRUSH_BUCKET_LIST = 2, + CRUSH_BUCKET_TREE = 3, + CRUSH_BUCKET_STRAW = 4 +}; +static inline const char *crush_bucket_alg_name(int alg) +{ + switch (alg) { + case CRUSH_BUCKET_UNIFORM: return "uniform"; + case CRUSH_BUCKET_LIST: return "list"; + case CRUSH_BUCKET_TREE: return "tree"; + case CRUSH_BUCKET_STRAW: return "straw"; + default: return "unknown"; + } +} + +struct crush_bucket { + __s32 id; /* this'll be negative */ + __u16 type; /* non-zero; type=0 is reserved for devices */ + __u16 alg; /* one of CRUSH_BUCKET_* */ + __u32 weight; /* 16-bit fixed point */ + __u32 size; /* num items */ + __s32 *items; + + /* + * cached random permutation: used for uniform bucket and for + * the linear search fallback for the other bucket types. + */ + __u32 perm_x; /* @x for which *perm is defined */ + __u32 perm_n; /* num elements of *perm that are permuted/defined */ + __u32 *perm; +}; + +struct crush_bucket_uniform { + struct crush_bucket h; + __u32 item_weight; /* 16-bit fixed point; all items equally weighted */ +}; + +struct crush_bucket_list { + struct crush_bucket h; + __u32 *item_weights; /* 16-bit fixed point */ + __u32 *sum_weights; /* 16-bit fixed point. element i is sum + of weights 0..i, inclusive */ +}; + +struct crush_bucket_tree { + struct crush_bucket h; /* note: h.size is _tree_ size, not number of + actual items */ + __u8 num_nodes; + __u32 *node_weights; +}; + +struct crush_bucket_straw { + struct crush_bucket h; + __u32 *item_weights; /* 16-bit fixed point */ + __u32 *straws; /* 16-bit fixed point */ +}; + + + +/* + * CRUSH map includes all buckets, rules, etc. + */ +struct crush_map { + struct crush_bucket **buckets; + struct crush_rule **rules; + + /* + * Parent pointers to identify the parent bucket a device or + * bucket in the hierarchy. If an item appears more than + * once, this is the _last_ time it appeared (where buckets + * are processed in bucket id order, from -1 on down to + * -max_buckets. + */ + __u32 *bucket_parents; + __u32 *device_parents; + + __s32 max_buckets; + __u32 max_rules; + __s32 max_devices; +}; + + +/* crush.c */ +extern int crush_get_bucket_item_weight(struct crush_bucket *b, int pos); +extern void crush_calc_parents(struct crush_map *map); +extern void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b); +extern void crush_destroy_bucket_list(struct crush_bucket_list *b); +extern void crush_destroy_bucket_tree(struct crush_bucket_tree *b); +extern void crush_destroy_bucket_straw(struct crush_bucket_straw *b); +extern void crush_destroy_bucket(struct crush_bucket *b); +extern void crush_destroy(struct crush_map *map); + +#endif diff --git a/fs/ceph/crush/hash.h b/fs/ceph/crush/hash.h new file mode 100644 index 000000000000..42f3312783a1 --- /dev/null +++ b/fs/ceph/crush/hash.h @@ -0,0 +1,90 @@ +#ifndef _CRUSH_HASH_H +#define _CRUSH_HASH_H + +/* + * Robert Jenkins' function for mixing 32-bit values + * http://burtleburtle.net/bob/hash/evahash.html + * a, b = random bits, c = input and output + */ +#define crush_hashmix(a, b, c) do { \ + a = a-b; a = a-c; a = a^(c>>13); \ + b = b-c; b = b-a; b = b^(a<<8); \ + c = c-a; c = c-b; c = c^(b>>13); \ + a = a-b; a = a-c; a = a^(c>>12); \ + b = b-c; b = b-a; b = b^(a<<16); \ + c = c-a; c = c-b; c = c^(b>>5); \ + a = a-b; a = a-c; a = a^(c>>3); \ + b = b-c; b = b-a; b = b^(a<<10); \ + c = c-a; c = c-b; c = c^(b>>15); \ + } while (0) + +#define crush_hash_seed 1315423911 + +static inline __u32 crush_hash32(__u32 a) +{ + __u32 hash = crush_hash_seed ^ a; + __u32 b = a; + __u32 x = 231232; + __u32 y = 1232; + crush_hashmix(b, x, hash); + crush_hashmix(y, a, hash); + return hash; +} + +static inline __u32 crush_hash32_2(__u32 a, __u32 b) +{ + __u32 hash = crush_hash_seed ^ a ^ b; + __u32 x = 231232; + __u32 y = 1232; + crush_hashmix(a, b, hash); + crush_hashmix(x, a, hash); + crush_hashmix(b, y, hash); + return hash; +} + +static inline __u32 crush_hash32_3(__u32 a, __u32 b, __u32 c) +{ + __u32 hash = crush_hash_seed ^ a ^ b ^ c; + __u32 x = 231232; + __u32 y = 1232; + crush_hashmix(a, b, hash); + crush_hashmix(c, x, hash); + crush_hashmix(y, a, hash); + crush_hashmix(b, x, hash); + crush_hashmix(y, c, hash); + return hash; +} + +static inline __u32 crush_hash32_4(__u32 a, __u32 b, __u32 c, + __u32 d) +{ + __u32 hash = crush_hash_seed ^ a ^ b ^ c ^ d; + __u32 x = 231232; + __u32 y = 1232; + crush_hashmix(a, b, hash); + crush_hashmix(c, d, hash); + crush_hashmix(a, x, hash); + crush_hashmix(y, b, hash); + crush_hashmix(c, x, hash); + crush_hashmix(y, d, hash); + return hash; +} + +static inline __u32 crush_hash32_5(__u32 a, __u32 b, __u32 c, + __u32 d, __u32 e) +{ + __u32 hash = crush_hash_seed ^ a ^ b ^ c ^ d ^ e; + __u32 x = 231232; + __u32 y = 1232; + crush_hashmix(a, b, hash); + crush_hashmix(c, d, hash); + crush_hashmix(e, x, hash); + crush_hashmix(y, a, hash); + crush_hashmix(b, x, hash); + crush_hashmix(y, c, hash); + crush_hashmix(d, x, hash); + crush_hashmix(y, e, hash); + return hash; +} + +#endif diff --git a/fs/ceph/crush/mapper.c b/fs/ceph/crush/mapper.c new file mode 100644 index 000000000000..0f0730c62695 --- /dev/null +++ b/fs/ceph/crush/mapper.c @@ -0,0 +1,589 @@ + +#ifdef __KERNEL__ +# include +# include +# include +# include +# ifndef dprintk +# define dprintk(args...) +# endif +#else +# include +# include +# include +# include +# define BUG_ON(x) assert(!(x)) +# define dprintk(args...) /* printf(args) */ +# define kmalloc(x, f) malloc(x) +# define kfree(x) free(x) +#endif + +#include "crush.h" +#include "hash.h" + +/* + * Implement the core CRUSH mapping algorithm. + */ + +/** + * crush_find_rule - find a crush_rule id for a given ruleset, type, and size. + * @map: the crush_map + * @ruleset: the storage ruleset id (user defined) + * @type: storage ruleset type (user defined) + * @size: output set size + */ +int crush_find_rule(struct crush_map *map, int ruleset, int type, int size) +{ + int i; + + for (i = 0; i < map->max_rules; i++) { + if (map->rules[i] && + map->rules[i]->mask.ruleset == ruleset && + map->rules[i]->mask.type == type && + map->rules[i]->mask.min_size <= size && + map->rules[i]->mask.max_size >= size) + return i; + } + return -1; +} + + +/* + * bucket choose methods + * + * For each bucket algorithm, we have a "choose" method that, given a + * crush input @x and replica position (usually, position in output set) @r, + * will produce an item in the bucket. + */ + +/* + * Choose based on a random permutation of the bucket. + * + * We used to use some prime number arithmetic to do this, but it + * wasn't very random, and had some other bad behaviors. Instead, we + * calculate an actual random permutation of the bucket members. + * Since this is expensive, we optimize for the r=0 case, which + * captures the vast majority of calls. + */ +static int bucket_perm_choose(struct crush_bucket *bucket, + int x, int r) +{ + unsigned pr = r % bucket->size; + unsigned i, s; + + /* start a new permutation if @x has changed */ + if (bucket->perm_x != x || bucket->perm_n == 0) { + dprintk("bucket %d new x=%d\n", bucket->id, x); + bucket->perm_x = x; + + /* optimize common r=0 case */ + if (pr == 0) { + s = crush_hash32_3(x, bucket->id, 0) % + bucket->size; + bucket->perm[0] = s; + bucket->perm_n = 0xffff; /* magic value, see below */ + goto out; + } + + for (i = 0; i < bucket->size; i++) + bucket->perm[i] = i; + bucket->perm_n = 0; + } else if (bucket->perm_n == 0xffff) { + /* clean up after the r=0 case above */ + for (i = 1; i < bucket->size; i++) + bucket->perm[i] = i; + bucket->perm[bucket->perm[0]] = 0; + bucket->perm_n = 1; + } + + /* calculate permutation up to pr */ + for (i = 0; i < bucket->perm_n; i++) + dprintk(" perm_choose have %d: %d\n", i, bucket->perm[i]); + while (bucket->perm_n <= pr) { + unsigned p = bucket->perm_n; + /* no point in swapping the final entry */ + if (p < bucket->size - 1) { + i = crush_hash32_3(x, bucket->id, p) % + (bucket->size - p); + if (i) { + unsigned t = bucket->perm[p + i]; + bucket->perm[p + i] = bucket->perm[p]; + bucket->perm[p] = t; + } + dprintk(" perm_choose swap %d with %d\n", p, p+i); + } + bucket->perm_n++; + } + for (i = 0; i < bucket->size; i++) + dprintk(" perm_choose %d: %d\n", i, bucket->perm[i]); + + s = bucket->perm[pr]; +out: + dprintk(" perm_choose %d sz=%d x=%d r=%d (%d) s=%d\n", bucket->id, + bucket->size, x, r, pr, s); + return bucket->items[s]; +} + +/* uniform */ +static int bucket_uniform_choose(struct crush_bucket_uniform *bucket, + int x, int r) +{ + return bucket_perm_choose(&bucket->h, x, r); +} + +/* list */ +static int bucket_list_choose(struct crush_bucket_list *bucket, + int x, int r) +{ + int i; + + for (i = bucket->h.size-1; i >= 0; i--) { + __u64 w = crush_hash32_4(x, bucket->h.items[i], r, + bucket->h.id); + w &= 0xffff; + dprintk("list_choose i=%d x=%d r=%d item %d weight %x " + "sw %x rand %llx", + i, x, r, bucket->h.items[i], bucket->item_weights[i], + bucket->sum_weights[i], w); + w *= bucket->sum_weights[i]; + w = w >> 16; + /*dprintk(" scaled %llx\n", w);*/ + if (w < bucket->item_weights[i]) + return bucket->h.items[i]; + } + + BUG_ON(1); + return 0; +} + + +/* (binary) tree */ +static int height(int n) +{ + int h = 0; + while ((n & 1) == 0) { + h++; + n = n >> 1; + } + return h; +} + +static int left(int x) +{ + int h = height(x); + return x - (1 << (h-1)); +} + +static int right(int x) +{ + int h = height(x); + return x + (1 << (h-1)); +} + +static int terminal(int x) +{ + return x & 1; +} + +static int bucket_tree_choose(struct crush_bucket_tree *bucket, + int x, int r) +{ + int n, l; + __u32 w; + __u64 t; + + /* start at root */ + n = bucket->num_nodes >> 1; + + while (!terminal(n)) { + /* pick point in [0, w) */ + w = bucket->node_weights[n]; + t = (__u64)crush_hash32_4(x, n, r, bucket->h.id) * (__u64)w; + t = t >> 32; + + /* descend to the left or right? */ + l = left(n); + if (t < bucket->node_weights[l]) + n = l; + else + n = right(n); + } + + return bucket->h.items[n >> 1]; +} + + +/* straw */ + +static int bucket_straw_choose(struct crush_bucket_straw *bucket, + int x, int r) +{ + int i; + int high = 0; + __u64 high_draw = 0; + __u64 draw; + + for (i = 0; i < bucket->h.size; i++) { + draw = crush_hash32_3(x, bucket->h.items[i], r); + draw &= 0xffff; + draw *= bucket->straws[i]; + if (i == 0 || draw > high_draw) { + high = i; + high_draw = draw; + } + } + return bucket->h.items[high]; +} + +static int crush_bucket_choose(struct crush_bucket *in, int x, int r) +{ + dprintk("choose %d x=%d r=%d\n", in->id, x, r); + switch (in->alg) { + case CRUSH_BUCKET_UNIFORM: + return bucket_uniform_choose((struct crush_bucket_uniform *)in, + x, r); + case CRUSH_BUCKET_LIST: + return bucket_list_choose((struct crush_bucket_list *)in, + x, r); + case CRUSH_BUCKET_TREE: + return bucket_tree_choose((struct crush_bucket_tree *)in, + x, r); + case CRUSH_BUCKET_STRAW: + return bucket_straw_choose((struct crush_bucket_straw *)in, + x, r); + default: + BUG_ON(1); +/* return in->items[0] */; + } +} + +/* + * true if device is marked "out" (failed, fully offloaded) + * of the cluster + */ +static int is_out(struct crush_map *map, __u32 *weight, int item, int x) +{ + if (weight[item] >= 0x1000) + return 0; + if (weight[item] == 0) + return 1; + if ((crush_hash32_2(x, item) & 0xffff) < weight[item]) + return 0; + return 1; +} + +/** + * crush_choose - choose numrep distinct items of given type + * @map: the crush_map + * @bucket: the bucket we are choose an item from + * @x: crush input value + * @numrep: the number of items to choose + * @type: the type of item to choose + * @out: pointer to output vector + * @outpos: our position in that vector + * @firstn: true if choosing "first n" items, false if choosing "indep" + * @recurse_to_leaf: true if we want one device under each item of given type + * @out2: second output vector for leaf items (if @recurse_to_leaf) + */ +static int crush_choose(struct crush_map *map, + struct crush_bucket *bucket, + __u32 *weight, + int x, int numrep, int type, + int *out, int outpos, + int firstn, int recurse_to_leaf, + int *out2) +{ + int rep; + int ftotal, flocal; + int retry_descent, retry_bucket, skip_rep; + struct crush_bucket *in = bucket; + int r; + int i; + int item; + int itemtype; + int collide, reject; + const int orig_tries = 5; /* attempts before we fall back to search */ + dprintk("choose bucket %d x %d outpos %d\n", bucket->id, x, outpos); + + for (rep = outpos; rep < numrep; rep++) { + /* keep trying until we get a non-out, non-colliding item */ + ftotal = 0; + skip_rep = 0; + do { + retry_descent = 0; + in = bucket; /* initial bucket */ + + /* choose through intervening buckets */ + flocal = 0; + do { + retry_bucket = 0; + r = rep; + if (in->alg == CRUSH_BUCKET_UNIFORM) { + /* be careful */ + if (firstn || numrep >= in->size) + /* r' = r + f_total */ + r += ftotal; + else if (in->size % numrep == 0) + /* r'=r+(n+1)*f_local */ + r += (numrep+1) * + (flocal+ftotal); + else + /* r' = r + n*f_local */ + r += numrep * (flocal+ftotal); + } else { + if (firstn) + /* r' = r + f_total */ + r += ftotal; + else + /* r' = r + n*f_local */ + r += numrep * (flocal+ftotal); + } + + /* bucket choose */ + if (flocal >= (in->size>>1) && + flocal > orig_tries) + item = bucket_perm_choose(in, x, r); + else + item = crush_bucket_choose(in, x, r); + BUG_ON(item >= map->max_devices); + + /* desired type? */ + if (item < 0) + itemtype = map->buckets[-1-item]->type; + else + itemtype = 0; + dprintk(" item %d type %d\n", item, itemtype); + + /* keep going? */ + if (itemtype != type) { + BUG_ON(item >= 0 || + (-1-item) >= map->max_buckets); + in = map->buckets[-1-item]; + continue; + } + + /* collision? */ + collide = 0; + for (i = 0; i < outpos; i++) { + if (out[i] == item) { + collide = 1; + break; + } + } + + if (recurse_to_leaf && + item < 0 && + crush_choose(map, map->buckets[-1-item], + weight, + x, outpos+1, 0, + out2, outpos, + firstn, 0, NULL) <= outpos) { + reject = 1; + } else { + /* out? */ + if (itemtype == 0) + reject = is_out(map, weight, + item, x); + else + reject = 0; + } + + if (reject || collide) { + ftotal++; + flocal++; + + if (collide && flocal < 3) + /* retry locally a few times */ + retry_bucket = 1; + else if (flocal < in->size + orig_tries) + /* exhaustive bucket search */ + retry_bucket = 1; + else if (ftotal < 20) + /* then retry descent */ + retry_descent = 1; + else + /* else give up */ + skip_rep = 1; + dprintk(" reject %d collide %d " + "ftotal %d flocal %d\n", + reject, collide, ftotal, + flocal); + } + } while (retry_bucket); + } while (retry_descent); + + if (skip_rep) { + dprintk("skip rep\n"); + continue; + } + + dprintk("choose got %d\n", item); + out[outpos] = item; + outpos++; + } + + dprintk("choose returns %d\n", outpos); + return outpos; +} + + +/** + * crush_do_rule - calculate a mapping with the given input and rule + * @map: the crush_map + * @ruleno: the rule id + * @x: hash input + * @result: pointer to result vector + * @result_max: maximum result size + * @force: force initial replica choice; -1 for none + */ +int crush_do_rule(struct crush_map *map, + int ruleno, int x, int *result, int result_max, + int force, __u32 *weight) +{ + int result_len; + int force_context[CRUSH_MAX_DEPTH]; + int force_pos = -1; + int a[CRUSH_MAX_SET]; + int b[CRUSH_MAX_SET]; + int c[CRUSH_MAX_SET]; + int recurse_to_leaf; + int *w; + int wsize = 0; + int *o; + int osize; + int *tmp; + struct crush_rule *rule; + int step; + int i, j; + int numrep; + int firstn; + int rc = -1; + + BUG_ON(ruleno >= map->max_rules); + + rule = map->rules[ruleno]; + result_len = 0; + w = a; + o = b; + + /* + * determine hierarchical context of force, if any. note + * that this may or may not correspond to the specific types + * referenced by the crush rule. + */ + if (force >= 0) { + if (force >= map->max_devices || + map->device_parents[force] == 0) { + /*dprintk("CRUSH: forcefed device dne\n");*/ + rc = -1; /* force fed device dne */ + goto out; + } + if (!is_out(map, weight, force, x)) { + while (1) { + force_context[++force_pos] = force; + if (force >= 0) + force = map->device_parents[force]; + else + force = map->bucket_parents[-1-force]; + if (force == 0) + break; + } + } + } + + for (step = 0; step < rule->len; step++) { + firstn = 0; + switch (rule->steps[step].op) { + case CRUSH_RULE_TAKE: + w[0] = rule->steps[step].arg1; + if (force_pos >= 0) { + BUG_ON(force_context[force_pos] != w[0]); + force_pos--; + } + wsize = 1; + break; + + case CRUSH_RULE_CHOOSE_LEAF_FIRSTN: + case CRUSH_RULE_CHOOSE_FIRSTN: + firstn = 1; + case CRUSH_RULE_CHOOSE_LEAF_INDEP: + case CRUSH_RULE_CHOOSE_INDEP: + BUG_ON(wsize == 0); + + recurse_to_leaf = + rule->steps[step].op == + CRUSH_RULE_CHOOSE_LEAF_FIRSTN || + rule->steps[step].op == + CRUSH_RULE_CHOOSE_LEAF_INDEP; + + /* reset output */ + osize = 0; + + for (i = 0; i < wsize; i++) { + /* + * see CRUSH_N, CRUSH_N_MINUS macros. + * basically, numrep <= 0 means relative to + * the provided result_max + */ + numrep = rule->steps[step].arg1; + if (numrep <= 0) { + numrep += result_max; + if (numrep <= 0) + continue; + } + j = 0; + if (osize == 0 && force_pos >= 0) { + /* skip any intermediate types */ + while (force_pos && + force_context[force_pos] < 0 && + rule->steps[step].arg2 != + map->buckets[-1 - + force_context[force_pos]]->type) + force_pos--; + o[osize] = force_context[force_pos]; + if (recurse_to_leaf) + c[osize] = force_context[0]; + j++; + force_pos--; + } + osize += crush_choose(map, + map->buckets[-1-w[i]], + weight, + x, numrep, + rule->steps[step].arg2, + o+osize, j, + firstn, + recurse_to_leaf, c+osize); + } + + if (recurse_to_leaf) + /* copy final _leaf_ values to output set */ + memcpy(o, c, osize*sizeof(*o)); + + /* swap t and w arrays */ + tmp = o; + o = w; + w = tmp; + wsize = osize; + break; + + + case CRUSH_RULE_EMIT: + for (i = 0; i < wsize && result_len < result_max; i++) { + result[result_len] = w[i]; + result_len++; + } + wsize = 0; + break; + + default: + BUG_ON(1); + } + } + rc = result_len; + +out: + return rc; +} + + diff --git a/fs/ceph/crush/mapper.h b/fs/ceph/crush/mapper.h new file mode 100644 index 000000000000..98e90046fd9f --- /dev/null +++ b/fs/ceph/crush/mapper.h @@ -0,0 +1,20 @@ +#ifndef _CRUSH_MAPPER_H +#define _CRUSH_MAPPER_H + +/* + * CRUSH functions for find rules and then mapping an input to an + * output set. + * + * LGPL2 + */ + +#include "crush.h" + +extern int crush_find_rule(struct crush_map *map, int pool, int type, int size); +extern int crush_do_rule(struct crush_map *map, + int ruleno, + int x, int *result, int result_max, + int forcefeed, /* -1 for none */ + __u32 *weights); + +#endif -- cgit v1.2.3 From ba75bb98cfb93b62c54af25bf67ff90857264bbe Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 6 Oct 2009 11:31:11 -0700 Subject: ceph: monitor client The monitor cluster is responsible for managing cluster membership and state. The monitor client handles what minimal interaction the Ceph client has with it: checking for updated versions of the MDS and OSD maps, getting statfs() information, and unmounting. Signed-off-by: Sage Weil --- fs/ceph/mon_client.c | 694 +++++++++++++++++++++++++++++++++++++++++++++++++++ fs/ceph/mon_client.h | 109 ++++++++ 2 files changed, 803 insertions(+) create mode 100644 fs/ceph/mon_client.c create mode 100644 fs/ceph/mon_client.h diff --git a/fs/ceph/mon_client.c b/fs/ceph/mon_client.c new file mode 100644 index 000000000000..b0c95cec5df8 --- /dev/null +++ b/fs/ceph/mon_client.c @@ -0,0 +1,694 @@ +#include "ceph_debug.h" + +#include +#include +#include + +#include "mon_client.h" +#include "super.h" +#include "decode.h" + +/* + * Interact with Ceph monitor cluster. Handle requests for new map + * versions, and periodically resend as needed. Also implement + * statfs() and umount(). + * + * A small cluster of Ceph "monitors" are responsible for managing critical + * cluster configuration and state information. An odd number (e.g., 3, 5) + * of cmon daemons use a modified version of the Paxos part-time parliament + * algorithm to manage the MDS map (mds cluster membership), OSD map, and + * list of clients who have mounted the file system. + * + * We maintain an open, active session with a monitor at all times in order to + * receive timely MDSMap updates. We periodically send a keepalive byte on the + * TCP socket to ensure we detect a failure. If the connection does break, we + * randomly hunt for a new monitor. Once the connection is reestablished, we + * resend any outstanding requests. + */ + +const static struct ceph_connection_operations mon_con_ops; + +/* + * Decode a monmap blob (e.g., during mount). + */ +struct ceph_monmap *ceph_monmap_decode(void *p, void *end) +{ + struct ceph_monmap *m = NULL; + int i, err = -EINVAL; + struct ceph_fsid fsid; + u32 epoch, num_mon; + u16 version; + + dout("monmap_decode %p %p len %d\n", p, end, (int)(end-p)); + + ceph_decode_16_safe(&p, end, version, bad); + + ceph_decode_need(&p, end, sizeof(fsid) + 2*sizeof(u32), bad); + ceph_decode_copy(&p, &fsid, sizeof(fsid)); + ceph_decode_32(&p, epoch); + + ceph_decode_32(&p, num_mon); + ceph_decode_need(&p, end, num_mon*sizeof(m->mon_inst[0]), bad); + + if (num_mon >= CEPH_MAX_MON) + goto bad; + m = kmalloc(sizeof(*m) + sizeof(m->mon_inst[0])*num_mon, GFP_NOFS); + if (m == NULL) + return ERR_PTR(-ENOMEM); + m->fsid = fsid; + m->epoch = epoch; + m->num_mon = num_mon; + ceph_decode_copy(&p, m->mon_inst, num_mon*sizeof(m->mon_inst[0])); + + if (p != end) + goto bad; + + dout("monmap_decode epoch %d, num_mon %d\n", m->epoch, + m->num_mon); + for (i = 0; i < m->num_mon; i++) + dout("monmap_decode mon%d is %s\n", i, + pr_addr(&m->mon_inst[i].addr.in_addr)); + return m; + +bad: + dout("monmap_decode failed with %d\n", err); + kfree(m); + return ERR_PTR(err); +} + +/* + * return true if *addr is included in the monmap. + */ +int ceph_monmap_contains(struct ceph_monmap *m, struct ceph_entity_addr *addr) +{ + int i; + + for (i = 0; i < m->num_mon; i++) + if (ceph_entity_addr_equal(addr, &m->mon_inst[i].addr)) + return 1; + return 0; +} + +/* + * Close monitor session, if any. + */ +static void __close_session(struct ceph_mon_client *monc) +{ + if (monc->con) { + dout("__close_session closing mon%d\n", monc->cur_mon); + ceph_con_close(monc->con); + monc->cur_mon = -1; + } +} + +/* + * Open a session with a (new) monitor. + */ +static int __open_session(struct ceph_mon_client *monc) +{ + char r; + + if (monc->cur_mon < 0) { + get_random_bytes(&r, 1); + monc->cur_mon = r % monc->monmap->num_mon; + dout("open_session num=%d r=%d -> mon%d\n", + monc->monmap->num_mon, r, monc->cur_mon); + monc->sub_sent = 0; + monc->sub_renew_after = jiffies; /* i.e., expired */ + monc->want_next_osdmap = !!monc->want_next_osdmap; + + dout("open_session mon%d opening\n", monc->cur_mon); + monc->con->peer_name.type = CEPH_ENTITY_TYPE_MON; + monc->con->peer_name.num = cpu_to_le64(monc->cur_mon); + ceph_con_open(monc->con, + &monc->monmap->mon_inst[monc->cur_mon].addr); + } else { + dout("open_session mon%d already open\n", monc->cur_mon); + } + return 0; +} + +static bool __sub_expired(struct ceph_mon_client *monc) +{ + return time_after_eq(jiffies, monc->sub_renew_after); +} + +/* + * Reschedule delayed work timer. + */ +static void __schedule_delayed(struct ceph_mon_client *monc) +{ + unsigned delay; + + if (monc->cur_mon < 0 || monc->want_mount || __sub_expired(monc)) + delay = 10 * HZ; + else + delay = 20 * HZ; + dout("__schedule_delayed after %u\n", delay); + schedule_delayed_work(&monc->delayed_work, delay); +} + +/* + * Send subscribe request for mdsmap and/or osdmap. + */ +static void __send_subscribe(struct ceph_mon_client *monc) +{ + dout("__send_subscribe sub_sent=%u exp=%u want_osd=%d\n", + (unsigned)monc->sub_sent, __sub_expired(monc), + monc->want_next_osdmap); + if ((__sub_expired(monc) && !monc->sub_sent) || + monc->want_next_osdmap == 1) { + struct ceph_msg *msg; + struct ceph_mon_subscribe_item *i; + void *p, *end; + + msg = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 64, 0, 0, NULL); + if (!msg) + return; + + p = msg->front.iov_base; + end = p + msg->front.iov_len; + + dout("__send_subscribe to 'mdsmap' %u+\n", + (unsigned)monc->have_mdsmap); + if (monc->want_next_osdmap) { + dout("__send_subscribe to 'osdmap' %u\n", + (unsigned)monc->have_osdmap); + ceph_encode_32(&p, 2); + ceph_encode_string(&p, end, "osdmap", 6); + i = p; + i->have = cpu_to_le64(monc->have_osdmap); + i->onetime = 1; + p += sizeof(*i); + monc->want_next_osdmap = 2; /* requested */ + } else { + ceph_encode_32(&p, 1); + } + ceph_encode_string(&p, end, "mdsmap", 6); + i = p; + i->have = cpu_to_le64(monc->have_mdsmap); + i->onetime = 0; + p += sizeof(*i); + + msg->front.iov_len = p - msg->front.iov_base; + msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); + ceph_con_send(monc->con, msg); + + monc->sub_sent = jiffies | 1; /* never 0 */ + } +} + +static void handle_subscribe_ack(struct ceph_mon_client *monc, + struct ceph_msg *msg) +{ + unsigned seconds; + void *p = msg->front.iov_base; + void *end = p + msg->front.iov_len; + + ceph_decode_32_safe(&p, end, seconds, bad); + mutex_lock(&monc->mutex); + if (monc->hunting) { + pr_info("mon%d %s session established\n", + monc->cur_mon, pr_addr(&monc->con->peer_addr.in_addr)); + monc->hunting = false; + } + dout("handle_subscribe_ack after %d seconds\n", seconds); + monc->sub_renew_after = monc->sub_sent + seconds*HZ - 1; + monc->sub_sent = 0; + mutex_unlock(&monc->mutex); + return; +bad: + pr_err("got corrupt subscribe-ack msg\n"); +} + +/* + * Keep track of which maps we have + */ +int ceph_monc_got_mdsmap(struct ceph_mon_client *monc, u32 got) +{ + mutex_lock(&monc->mutex); + monc->have_mdsmap = got; + mutex_unlock(&monc->mutex); + return 0; +} + +int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 got) +{ + mutex_lock(&monc->mutex); + monc->have_osdmap = got; + monc->want_next_osdmap = 0; + mutex_unlock(&monc->mutex); + return 0; +} + +/* + * Register interest in the next osdmap + */ +void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc) +{ + dout("request_next_osdmap have %u\n", monc->have_osdmap); + mutex_lock(&monc->mutex); + if (!monc->want_next_osdmap) + monc->want_next_osdmap = 1; + if (monc->want_next_osdmap < 2) + __send_subscribe(monc); + mutex_unlock(&monc->mutex); +} + + +/* + * mount + */ +static void __request_mount(struct ceph_mon_client *monc) +{ + struct ceph_msg *msg; + struct ceph_client_mount *h; + int err; + + dout("__request_mount\n"); + err = __open_session(monc); + if (err) + return; + msg = ceph_msg_new(CEPH_MSG_CLIENT_MOUNT, sizeof(*h), 0, 0, NULL); + if (IS_ERR(msg)) + return; + h = msg->front.iov_base; + h->have_version = 0; + ceph_con_send(monc->con, msg); +} + +int ceph_monc_request_mount(struct ceph_mon_client *monc) +{ + if (!monc->con) { + monc->con = kmalloc(sizeof(*monc->con), GFP_KERNEL); + if (!monc->con) + return -ENOMEM; + ceph_con_init(monc->client->msgr, monc->con); + monc->con->private = monc; + monc->con->ops = &mon_con_ops; + } + + mutex_lock(&monc->mutex); + __request_mount(monc); + __schedule_delayed(monc); + mutex_unlock(&monc->mutex); + return 0; +} + +/* + * The monitor responds with mount ack indicate mount success. The + * included client ticket allows the client to talk to MDSs and OSDs. + */ +static void handle_mount_ack(struct ceph_mon_client *monc, struct ceph_msg *msg) +{ + struct ceph_client *client = monc->client; + struct ceph_monmap *monmap = NULL, *old = monc->monmap; + void *p, *end; + s32 result; + u32 len; + s64 cnum; + int err = -EINVAL; + + if (client->whoami >= 0) { + dout("handle_mount_ack - already mounted\n"); + return; + } + + mutex_lock(&monc->mutex); + + dout("handle_mount_ack\n"); + p = msg->front.iov_base; + end = p + msg->front.iov_len; + + ceph_decode_64_safe(&p, end, cnum, bad); + ceph_decode_32_safe(&p, end, result, bad); + ceph_decode_32_safe(&p, end, len, bad); + if (result) { + pr_err("mount denied: %.*s (%d)\n", len, (char *)p, + result); + err = result; + goto out; + } + p += len; + + ceph_decode_32_safe(&p, end, len, bad); + ceph_decode_need(&p, end, len, bad); + monmap = ceph_monmap_decode(p, p + len); + if (IS_ERR(monmap)) { + pr_err("problem decoding monmap, %d\n", + (int)PTR_ERR(monmap)); + err = -EINVAL; + goto out; + } + p += len; + + client->monc.monmap = monmap; + kfree(old); + + client->signed_ticket = NULL; + client->signed_ticket_len = 0; + + monc->want_mount = false; + + client->whoami = cnum; + client->msgr->inst.name.type = CEPH_ENTITY_TYPE_CLIENT; + client->msgr->inst.name.num = cpu_to_le64(cnum); + pr_info("client%lld fsid " FSID_FORMAT "\n", + client->whoami, PR_FSID(&client->monc.monmap->fsid)); + + ceph_debugfs_client_init(client); + __send_subscribe(monc); + + err = 0; + goto out; + +bad: + pr_err("error decoding mount_ack message\n"); +out: + client->mount_err = err; + mutex_unlock(&monc->mutex); + wake_up(&client->mount_wq); +} + + + + +/* + * statfs + */ +static void handle_statfs_reply(struct ceph_mon_client *monc, + struct ceph_msg *msg) +{ + struct ceph_mon_statfs_request *req; + struct ceph_mon_statfs_reply *reply = msg->front.iov_base; + u64 tid; + + if (msg->front.iov_len != sizeof(*reply)) + goto bad; + tid = le64_to_cpu(reply->tid); + dout("handle_statfs_reply %p tid %llu\n", msg, tid); + + mutex_lock(&monc->mutex); + req = radix_tree_lookup(&monc->statfs_request_tree, tid); + if (req) { + *req->buf = reply->st; + req->result = 0; + } + mutex_unlock(&monc->mutex); + if (req) + complete(&req->completion); + return; + +bad: + pr_err("corrupt statfs reply, no tid\n"); +} + +/* + * (re)send a statfs request + */ +static int send_statfs(struct ceph_mon_client *monc, + struct ceph_mon_statfs_request *req) +{ + struct ceph_msg *msg; + struct ceph_mon_statfs *h; + int err; + + dout("send_statfs tid %llu\n", req->tid); + err = __open_session(monc); + if (err) + return err; + msg = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), 0, 0, NULL); + if (IS_ERR(msg)) + return PTR_ERR(msg); + req->request = msg; + h = msg->front.iov_base; + h->have_version = 0; + h->fsid = monc->monmap->fsid; + h->tid = cpu_to_le64(req->tid); + ceph_con_send(monc->con, msg); + return 0; +} + +/* + * Do a synchronous statfs(). + */ +int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf) +{ + struct ceph_mon_statfs_request req; + int err; + + req.buf = buf; + init_completion(&req.completion); + + /* allocate memory for reply */ + err = ceph_msgpool_resv(&monc->msgpool_statfs_reply, 1); + if (err) + return err; + + /* register request */ + mutex_lock(&monc->mutex); + req.tid = ++monc->last_tid; + req.last_attempt = jiffies; + req.delay = BASE_DELAY_INTERVAL; + if (radix_tree_insert(&monc->statfs_request_tree, req.tid, &req) < 0) { + mutex_unlock(&monc->mutex); + pr_err("ENOMEM in do_statfs\n"); + return -ENOMEM; + } + monc->num_statfs_requests++; + mutex_unlock(&monc->mutex); + + /* send request and wait */ + err = send_statfs(monc, &req); + if (!err) + err = wait_for_completion_interruptible(&req.completion); + + mutex_lock(&monc->mutex); + radix_tree_delete(&monc->statfs_request_tree, req.tid); + monc->num_statfs_requests--; + ceph_msgpool_resv(&monc->msgpool_statfs_reply, -1); + mutex_unlock(&monc->mutex); + + if (!err) + err = req.result; + return err; +} + +/* + * Resend pending statfs requests. + */ +static void __resend_statfs(struct ceph_mon_client *monc) +{ + u64 next_tid = 0; + int got; + int did = 0; + struct ceph_mon_statfs_request *req; + + while (1) { + got = radix_tree_gang_lookup(&monc->statfs_request_tree, + (void **)&req, + next_tid, 1); + if (got == 0) + break; + did++; + next_tid = req->tid + 1; + + send_statfs(monc, req); + } +} + +/* + * Delayed work. If we haven't mounted yet, retry. Otherwise, + * renew/retry subscription as needed (in case it is timing out, or we + * got an ENOMEM). And keep the monitor connection alive. + */ +static void delayed_work(struct work_struct *work) +{ + struct ceph_mon_client *monc = + container_of(work, struct ceph_mon_client, delayed_work.work); + + dout("monc delayed_work\n"); + mutex_lock(&monc->mutex); + if (monc->want_mount) { + __request_mount(monc); + } else { + if (__sub_expired(monc)) { + __close_session(monc); + __open_session(monc); /* continue hunting */ + } else { + ceph_con_keepalive(monc->con); + } + } + __send_subscribe(monc); + __schedule_delayed(monc); + mutex_unlock(&monc->mutex); +} + +int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl) +{ + int err = 0; + + dout("init\n"); + memset(monc, 0, sizeof(*monc)); + monc->client = cl; + monc->monmap = NULL; + mutex_init(&monc->mutex); + + monc->con = NULL; + + /* msg pools */ + err = ceph_msgpool_init(&monc->msgpool_mount_ack, 4096, 1, false); + if (err < 0) + goto out; + err = ceph_msgpool_init(&monc->msgpool_subscribe_ack, 8, 1, false); + if (err < 0) + goto out; + err = ceph_msgpool_init(&monc->msgpool_statfs_reply, + sizeof(struct ceph_mon_statfs_reply), 0, false); + if (err < 0) + goto out; + + monc->cur_mon = -1; + monc->hunting = false; /* not really */ + monc->sub_renew_after = jiffies; + monc->sub_sent = 0; + + INIT_DELAYED_WORK(&monc->delayed_work, delayed_work); + INIT_RADIX_TREE(&monc->statfs_request_tree, GFP_NOFS); + monc->num_statfs_requests = 0; + monc->last_tid = 0; + + monc->have_mdsmap = 0; + monc->have_osdmap = 0; + monc->want_next_osdmap = 1; + monc->want_mount = true; +out: + return err; +} + +void ceph_monc_stop(struct ceph_mon_client *monc) +{ + dout("stop\n"); + cancel_delayed_work_sync(&monc->delayed_work); + + mutex_lock(&monc->mutex); + __close_session(monc); + if (monc->con) { + monc->con->private = NULL; + monc->con->ops->put(monc->con); + monc->con = NULL; + } + mutex_unlock(&monc->mutex); + + ceph_msgpool_destroy(&monc->msgpool_mount_ack); + ceph_msgpool_destroy(&monc->msgpool_subscribe_ack); + ceph_msgpool_destroy(&monc->msgpool_statfs_reply); + + kfree(monc->monmap); +} + + +/* + * handle incoming message + */ +static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) +{ + struct ceph_mon_client *monc = con->private; + int type = le16_to_cpu(msg->hdr.type); + + if (!monc) + return; + + switch (type) { + case CEPH_MSG_CLIENT_MOUNT_ACK: + handle_mount_ack(monc, msg); + break; + + case CEPH_MSG_MON_SUBSCRIBE_ACK: + handle_subscribe_ack(monc, msg); + break; + + case CEPH_MSG_STATFS_REPLY: + handle_statfs_reply(monc, msg); + break; + + case CEPH_MSG_MDS_MAP: + ceph_mdsc_handle_map(&monc->client->mdsc, msg); + break; + + case CEPH_MSG_OSD_MAP: + ceph_osdc_handle_map(&monc->client->osdc, msg); + break; + + default: + pr_err("received unknown message type %d %s\n", type, + ceph_msg_type_name(type)); + } + ceph_msg_put(msg); +} + +/* + * Allocate memory for incoming message + */ +static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con, + struct ceph_msg_header *hdr) +{ + struct ceph_mon_client *monc = con->private; + int type = le16_to_cpu(hdr->type); + + switch (type) { + case CEPH_MSG_CLIENT_MOUNT_ACK: + return ceph_msgpool_get(&monc->msgpool_mount_ack); + case CEPH_MSG_MON_SUBSCRIBE_ACK: + return ceph_msgpool_get(&monc->msgpool_subscribe_ack); + case CEPH_MSG_STATFS_REPLY: + return ceph_msgpool_get(&monc->msgpool_statfs_reply); + } + return ceph_alloc_msg(con, hdr); +} + +/* + * If the monitor connection resets, pick a new monitor and resubmit + * any pending requests. + */ +static void mon_fault(struct ceph_connection *con) +{ + struct ceph_mon_client *monc = con->private; + + if (!monc) + return; + + dout("mon_fault\n"); + mutex_lock(&monc->mutex); + if (!con->private) + goto out; + + if (monc->con && !monc->hunting) + pr_info("mon%d %s session lost, " + "hunting for new mon\n", monc->cur_mon, + pr_addr(&monc->con->peer_addr.in_addr)); + + __close_session(monc); + if (!monc->hunting) { + /* start hunting */ + monc->hunting = true; + if (__open_session(monc) == 0) { + __send_subscribe(monc); + __resend_statfs(monc); + } + } else { + /* already hunting, let's wait a bit */ + __schedule_delayed(monc); + } +out: + mutex_unlock(&monc->mutex); +} + +const static struct ceph_connection_operations mon_con_ops = { + .get = ceph_con_get, + .put = ceph_con_put, + .dispatch = dispatch, + .fault = mon_fault, + .alloc_msg = mon_alloc_msg, + .alloc_middle = ceph_alloc_middle, +}; diff --git a/fs/ceph/mon_client.h b/fs/ceph/mon_client.h new file mode 100644 index 000000000000..5258c5693b03 --- /dev/null +++ b/fs/ceph/mon_client.h @@ -0,0 +1,109 @@ +#ifndef _FS_CEPH_MON_CLIENT_H +#define _FS_CEPH_MON_CLIENT_H + +#include +#include + +#include "messenger.h" +#include "msgpool.h" + +struct ceph_client; +struct ceph_mount_args; + +/* + * The monitor map enumerates the set of all monitors. + */ +struct ceph_monmap { + struct ceph_fsid fsid; + u32 epoch; + u32 num_mon; + struct ceph_entity_inst mon_inst[0]; +}; + +struct ceph_mon_client; +struct ceph_mon_statfs_request; + + +/* + * Generic mechanism for resending monitor requests. + */ +typedef void (*ceph_monc_request_func_t)(struct ceph_mon_client *monc, + int newmon); + +/* a pending monitor request */ +struct ceph_mon_request { + struct ceph_mon_client *monc; + struct delayed_work delayed_work; + unsigned long delay; + ceph_monc_request_func_t do_request; +}; + +/* + * statfs() is done a bit differently because we need to get data back + * to the caller + */ +struct ceph_mon_statfs_request { + u64 tid; + int result; + struct ceph_statfs *buf; + struct completion completion; + unsigned long last_attempt, delay; /* jiffies */ + struct ceph_msg *request; /* original request */ +}; + +struct ceph_mon_client { + struct ceph_client *client; + struct ceph_monmap *monmap; + + struct mutex mutex; + struct delayed_work delayed_work; + + bool hunting; + int cur_mon; /* last monitor i contacted */ + unsigned long sub_sent, sub_renew_after; + struct ceph_connection *con; + + /* msg pools */ + struct ceph_msgpool msgpool_mount_ack; + struct ceph_msgpool msgpool_subscribe_ack; + struct ceph_msgpool msgpool_statfs_reply; + + /* pending statfs requests */ + struct radix_tree_root statfs_request_tree; + int num_statfs_requests; + u64 last_tid; + + /* mds/osd map or mount requests */ + bool want_mount; + int want_next_osdmap; /* 1 = want, 2 = want+asked */ + u32 have_osdmap, have_mdsmap; + + struct dentry *debugfs_file; +}; + +extern struct ceph_monmap *ceph_monmap_decode(void *p, void *end); +extern int ceph_monmap_contains(struct ceph_monmap *m, + struct ceph_entity_addr *addr); + +extern int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl); +extern void ceph_monc_stop(struct ceph_mon_client *monc); + +/* + * The model here is to indicate that we need a new map of at least + * epoch @want, and also call in when we receive a map. We will + * periodically rerequest the map from the monitor cluster until we + * get what we want. + */ +extern int ceph_monc_got_mdsmap(struct ceph_mon_client *monc, u32 have); +extern int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 have); + +extern void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc); + +extern int ceph_monc_request_mount(struct ceph_mon_client *monc); + +extern int ceph_monc_do_statfs(struct ceph_mon_client *monc, + struct ceph_statfs *buf); + + + +#endif -- cgit v1.2.3 From a8599bd821d084d04a3290fffae1071624ec00ea Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 6 Oct 2009 11:31:12 -0700 Subject: ceph: capability management The Ceph metadata servers control client access to inode metadata and file data by issuing capabilities, granting clients permission to read and/or write both inode field and file data to OSDs (storage nodes). Each capability consists of a set of bits indicating which operations are allowed. If the client holds a *_SHARED cap, the client has a coherent value that can be safely read from the cached inode. In the case of a *_EXCL (exclusive) or FILE_WR capabilities, the client is allowed to change inode attributes (e.g., file size, mtime), note its dirty state in the ceph_cap, and asynchronously flush that metadata change to the MDS. In the event of a conflicting operation (perhaps by another client), the MDS will revoke the conflicting client capabilities. In order for a client to cache an inode, it must hold a capability with at least one MDS server. When inodes are released, release notifications are batched and periodically sent en masse to the MDS cluster to release server state. Signed-off-by: Sage Weil --- fs/ceph/caps.c | 2830 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 2830 insertions(+) create mode 100644 fs/ceph/caps.c diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c new file mode 100644 index 000000000000..5c7d0e9bbb7b --- /dev/null +++ b/fs/ceph/caps.c @@ -0,0 +1,2830 @@ +#include "ceph_debug.h" + +#include +#include +#include +#include +#include + +#include "super.h" +#include "decode.h" +#include "messenger.h" + +/* + * Capability management + * + * The Ceph metadata servers control client access to inode metadata + * and file data by issuing capabilities, granting clients permission + * to read and/or write both inode field and file data to OSDs + * (storage nodes). Each capability consists of a set of bits + * indicating which operations are allowed. + * + * If the client holds a *_SHARED cap, the client has a coherent value + * that can be safely read from the cached inode. + * + * In the case of a *_EXCL (exclusive) or FILE_WR capabilities, the + * client is allowed to change inode attributes (e.g., file size, + * mtime), note its dirty state in the ceph_cap, and asynchronously + * flush that metadata change to the MDS. + * + * In the event of a conflicting operation (perhaps by another + * client), the MDS will revoke the conflicting client capabilities. + * + * In order for a client to cache an inode, it must hold a capability + * with at least one MDS server. When inodes are released, release + * notifications are batched and periodically sent en masse to the MDS + * cluster to release server state. + */ + + +/* + * Generate readable cap strings for debugging output. + */ +#define MAX_CAP_STR 20 +static char cap_str[MAX_CAP_STR][40]; +static DEFINE_SPINLOCK(cap_str_lock); +static int last_cap_str; + +static char *gcap_string(char *s, int c) +{ + if (c & CEPH_CAP_GSHARED) + *s++ = 's'; + if (c & CEPH_CAP_GEXCL) + *s++ = 'x'; + if (c & CEPH_CAP_GCACHE) + *s++ = 'c'; + if (c & CEPH_CAP_GRD) + *s++ = 'r'; + if (c & CEPH_CAP_GWR) + *s++ = 'w'; + if (c & CEPH_CAP_GBUFFER) + *s++ = 'b'; + if (c & CEPH_CAP_GLAZYIO) + *s++ = 'l'; + return s; +} + +const char *ceph_cap_string(int caps) +{ + int i; + char *s; + int c; + + spin_lock(&cap_str_lock); + i = last_cap_str++; + if (last_cap_str == MAX_CAP_STR) + last_cap_str = 0; + spin_unlock(&cap_str_lock); + + s = cap_str[i]; + + if (caps & CEPH_CAP_PIN) + *s++ = 'p'; + + c = (caps >> CEPH_CAP_SAUTH) & 3; + if (c) { + *s++ = 'A'; + s = gcap_string(s, c); + } + + c = (caps >> CEPH_CAP_SLINK) & 3; + if (c) { + *s++ = 'L'; + s = gcap_string(s, c); + } + + c = (caps >> CEPH_CAP_SXATTR) & 3; + if (c) { + *s++ = 'X'; + s = gcap_string(s, c); + } + + c = caps >> CEPH_CAP_SFILE; + if (c) { + *s++ = 'F'; + s = gcap_string(s, c); + } + + if (s == cap_str[i]) + *s++ = '-'; + *s = 0; + return cap_str[i]; +} + +/* + * Cap reservations + * + * Maintain a global pool of preallocated struct ceph_caps, referenced + * by struct ceph_caps_reservations. This ensures that we preallocate + * memory needed to successfully process an MDS response. (If an MDS + * sends us cap information and we fail to process it, we will have + * problems due to the client and MDS being out of sync.) + * + * Reservations are 'owned' by a ceph_cap_reservation context. + */ +static spinlock_t caps_list_lock; +static struct list_head caps_list; /* unused (reserved or unreserved) */ +static int caps_total_count; /* total caps allocated */ +static int caps_use_count; /* in use */ +static int caps_reserve_count; /* unused, reserved */ +static int caps_avail_count; /* unused, unreserved */ + +void __init ceph_caps_init(void) +{ + INIT_LIST_HEAD(&caps_list); + spin_lock_init(&caps_list_lock); +} + +void ceph_caps_finalize(void) +{ + struct ceph_cap *cap; + + spin_lock(&caps_list_lock); + while (!list_empty(&caps_list)) { + cap = list_first_entry(&caps_list, struct ceph_cap, caps_item); + list_del(&cap->caps_item); + kmem_cache_free(ceph_cap_cachep, cap); + } + caps_total_count = 0; + caps_avail_count = 0; + caps_use_count = 0; + caps_reserve_count = 0; + spin_unlock(&caps_list_lock); +} + +int ceph_reserve_caps(struct ceph_cap_reservation *ctx, int need) +{ + int i; + struct ceph_cap *cap; + int have; + int alloc = 0; + LIST_HEAD(newcaps); + int ret = 0; + + dout("reserve caps ctx=%p need=%d\n", ctx, need); + + /* first reserve any caps that are already allocated */ + spin_lock(&caps_list_lock); + if (caps_avail_count >= need) + have = need; + else + have = caps_avail_count; + caps_avail_count -= have; + caps_reserve_count += have; + BUG_ON(caps_total_count != caps_use_count + caps_reserve_count + + caps_avail_count); + spin_unlock(&caps_list_lock); + + for (i = have; i < need; i++) { + cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS); + if (!cap) { + ret = -ENOMEM; + goto out_alloc_count; + } + list_add(&cap->caps_item, &newcaps); + alloc++; + } + BUG_ON(have + alloc != need); + + spin_lock(&caps_list_lock); + caps_total_count += alloc; + caps_reserve_count += alloc; + list_splice(&newcaps, &caps_list); + + BUG_ON(caps_total_count != caps_use_count + caps_reserve_count + + caps_avail_count); + spin_unlock(&caps_list_lock); + + ctx->count = need; + dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n", + ctx, caps_total_count, caps_use_count, caps_reserve_count, + caps_avail_count); + return 0; + +out_alloc_count: + /* we didn't manage to reserve as much as we needed */ + pr_warning("reserve caps ctx=%p ENOMEM need=%d got=%d\n", + ctx, need, have); + return ret; +} + +int ceph_unreserve_caps(struct ceph_cap_reservation *ctx) +{ + dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count); + if (ctx->count) { + spin_lock(&caps_list_lock); + BUG_ON(caps_reserve_count < ctx->count); + caps_reserve_count -= ctx->count; + caps_avail_count += ctx->count; + ctx->count = 0; + dout("unreserve caps %d = %d used + %d resv + %d avail\n", + caps_total_count, caps_use_count, caps_reserve_count, + caps_avail_count); + BUG_ON(caps_total_count != caps_use_count + caps_reserve_count + + caps_avail_count); + spin_unlock(&caps_list_lock); + } + return 0; +} + +static struct ceph_cap *get_cap(struct ceph_cap_reservation *ctx) +{ + struct ceph_cap *cap = NULL; + + /* temporary, until we do something about cap import/export */ + if (!ctx) + return kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS); + + spin_lock(&caps_list_lock); + dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n", + ctx, ctx->count, caps_total_count, caps_use_count, + caps_reserve_count, caps_avail_count); + BUG_ON(!ctx->count); + BUG_ON(ctx->count > caps_reserve_count); + BUG_ON(list_empty(&caps_list)); + + ctx->count--; + caps_reserve_count--; + caps_use_count++; + + cap = list_first_entry(&caps_list, struct ceph_cap, caps_item); + list_del(&cap->caps_item); + + BUG_ON(caps_total_count != caps_use_count + caps_reserve_count + + caps_avail_count); + spin_unlock(&caps_list_lock); + return cap; +} + +static void put_cap(struct ceph_cap *cap, + struct ceph_cap_reservation *ctx) +{ + spin_lock(&caps_list_lock); + dout("put_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n", + ctx, ctx ? ctx->count : 0, caps_total_count, caps_use_count, + caps_reserve_count, caps_avail_count); + caps_use_count--; + /* + * Keep some preallocated caps around, at least enough to do a + * readdir (which needs to preallocate lots of them), to avoid + * lots of free/alloc churn. + */ + if (caps_avail_count >= caps_reserve_count + + ceph_client(cap->ci->vfs_inode.i_sb)->mount_args.max_readdir) { + caps_total_count--; + kmem_cache_free(ceph_cap_cachep, cap); + } else { + if (ctx) { + ctx->count++; + caps_reserve_count++; + } else { + caps_avail_count++; + } + list_add(&cap->caps_item, &caps_list); + } + + BUG_ON(caps_total_count != caps_use_count + caps_reserve_count + + caps_avail_count); + spin_unlock(&caps_list_lock); +} + +void ceph_reservation_status(struct ceph_client *client, + int *total, int *avail, int *used, int *reserved) +{ + if (total) + *total = caps_total_count; + if (avail) + *avail = caps_avail_count; + if (used) + *used = caps_use_count; + if (reserved) + *reserved = caps_reserve_count; +} + +/* + * Find ceph_cap for given mds, if any. + * + * Called with i_lock held. + */ +static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds) +{ + struct ceph_cap *cap; + struct rb_node *n = ci->i_caps.rb_node; + + while (n) { + cap = rb_entry(n, struct ceph_cap, ci_node); + if (mds < cap->mds) + n = n->rb_left; + else if (mds > cap->mds) + n = n->rb_right; + else + return cap; + } + return NULL; +} + +/* + * Return id of any MDS with a cap, preferably FILE_WR|WRBUFFER|EXCL, else + * -1. + */ +static int __ceph_get_cap_mds(struct ceph_inode_info *ci, u32 *mseq) +{ + struct ceph_cap *cap; + int mds = -1; + struct rb_node *p; + + /* prefer mds with WR|WRBUFFER|EXCL caps */ + for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { + cap = rb_entry(p, struct ceph_cap, ci_node); + mds = cap->mds; + if (mseq) + *mseq = cap->mseq; + if (cap->issued & (CEPH_CAP_FILE_WR | + CEPH_CAP_FILE_BUFFER | + CEPH_CAP_FILE_EXCL)) + break; + } + return mds; +} + +int ceph_get_cap_mds(struct inode *inode) +{ + int mds; + spin_lock(&inode->i_lock); + mds = __ceph_get_cap_mds(ceph_inode(inode), NULL); + spin_unlock(&inode->i_lock); + return mds; +} + +/* + * Called under i_lock. + */ +static void __insert_cap_node(struct ceph_inode_info *ci, + struct ceph_cap *new) +{ + struct rb_node **p = &ci->i_caps.rb_node; + struct rb_node *parent = NULL; + struct ceph_cap *cap = NULL; + + while (*p) { + parent = *p; + cap = rb_entry(parent, struct ceph_cap, ci_node); + if (new->mds < cap->mds) + p = &(*p)->rb_left; + else if (new->mds > cap->mds) + p = &(*p)->rb_right; + else + BUG(); + } + + rb_link_node(&new->ci_node, parent, p); + rb_insert_color(&new->ci_node, &ci->i_caps); +} + +/* + * (re)set cap hold timeouts, which control the delayed release + * of unused caps back to the MDS. Should be called on cap use. + */ +static void __cap_set_timeouts(struct ceph_mds_client *mdsc, + struct ceph_inode_info *ci) +{ + struct ceph_mount_args *ma = &mdsc->client->mount_args; + + ci->i_hold_caps_min = round_jiffies(jiffies + + ma->caps_wanted_delay_min * HZ); + ci->i_hold_caps_max = round_jiffies(jiffies + + ma->caps_wanted_delay_max * HZ); + dout("__cap_set_timeouts %p min %lu max %lu\n", &ci->vfs_inode, + ci->i_hold_caps_min - jiffies, ci->i_hold_caps_max - jiffies); +} + +/* + * (Re)queue cap at the end of the delayed cap release list. + * + * If I_FLUSH is set, leave the inode at the front of the list. + * + * Caller holds i_lock + * -> we take mdsc->cap_delay_lock + */ +static void __cap_delay_requeue(struct ceph_mds_client *mdsc, + struct ceph_inode_info *ci) +{ + __cap_set_timeouts(mdsc, ci); + dout("__cap_delay_requeue %p flags %d at %lu\n", &ci->vfs_inode, + ci->i_ceph_flags, ci->i_hold_caps_max); + if (!mdsc->stopping) { + spin_lock(&mdsc->cap_delay_lock); + if (!list_empty(&ci->i_cap_delay_list)) { + if (ci->i_ceph_flags & CEPH_I_FLUSH) + goto no_change; + list_del_init(&ci->i_cap_delay_list); + } + list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list); +no_change: + spin_unlock(&mdsc->cap_delay_lock); + } +} + +/* + * Queue an inode for immediate writeback. Mark inode with I_FLUSH, + * indicating we should send a cap message to flush dirty metadata + * asap, and move to the front of the delayed cap list. + */ +static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc, + struct ceph_inode_info *ci) +{ + dout("__cap_delay_requeue_front %p\n", &ci->vfs_inode); + spin_lock(&mdsc->cap_delay_lock); + ci->i_ceph_flags |= CEPH_I_FLUSH; + if (!list_empty(&ci->i_cap_delay_list)) + list_del_init(&ci->i_cap_delay_list); + list_add(&ci->i_cap_delay_list, &mdsc->cap_delay_list); + spin_unlock(&mdsc->cap_delay_lock); +} + +/* + * Cancel delayed work on cap. + * + * Caller must hold i_lock. + */ +static void __cap_delay_cancel(struct ceph_mds_client *mdsc, + struct ceph_inode_info *ci) +{ + dout("__cap_delay_cancel %p\n", &ci->vfs_inode); + if (list_empty(&ci->i_cap_delay_list)) + return; + spin_lock(&mdsc->cap_delay_lock); + list_del_init(&ci->i_cap_delay_list); + spin_unlock(&mdsc->cap_delay_lock); +} + +/* + * Common issue checks for add_cap, handle_cap_grant. + */ +static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap, + unsigned issued) +{ + unsigned had = __ceph_caps_issued(ci, NULL); + + /* + * Each time we receive FILE_CACHE anew, we increment + * i_rdcache_gen. + */ + if ((issued & CEPH_CAP_FILE_CACHE) && + (had & CEPH_CAP_FILE_CACHE) == 0) + ci->i_rdcache_gen++; + + /* + * if we are newly issued FILE_SHARED, clear I_COMPLETE; we + * don't know what happened to this directory while we didn't + * have the cap. + */ + if ((issued & CEPH_CAP_FILE_SHARED) && + (had & CEPH_CAP_FILE_SHARED) == 0) { + ci->i_shared_gen++; + if (S_ISDIR(ci->vfs_inode.i_mode)) { + dout(" marking %p NOT complete\n", &ci->vfs_inode); + ci->i_ceph_flags &= ~CEPH_I_COMPLETE; + } + } +} + +/* + * Add a capability under the given MDS session. + * + * Caller should hold session snap_rwsem (read) and s_mutex. + * + * @fmode is the open file mode, if we are opening a file, otherwise + * it is < 0. (This is so we can atomically add the cap and add an + * open file reference to it.) + */ +int ceph_add_cap(struct inode *inode, + struct ceph_mds_session *session, u64 cap_id, + int fmode, unsigned issued, unsigned wanted, + unsigned seq, unsigned mseq, u64 realmino, int flags, + struct ceph_cap_reservation *caps_reservation) +{ + struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc; + struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_cap *new_cap = NULL; + struct ceph_cap *cap; + int mds = session->s_mds; + int actual_wanted; + + dout("add_cap %p mds%d cap %llx %s seq %d\n", inode, + session->s_mds, cap_id, ceph_cap_string(issued), seq); + + /* + * If we are opening the file, include file mode wanted bits + * in wanted. + */ + if (fmode >= 0) + wanted |= ceph_caps_for_mode(fmode); + +retry: + spin_lock(&inode->i_lock); + cap = __get_cap_for_mds(ci, mds); + if (!cap) { + if (new_cap) { + cap = new_cap; + new_cap = NULL; + } else { + spin_unlock(&inode->i_lock); + new_cap = get_cap(caps_reservation); + if (new_cap == NULL) + return -ENOMEM; + goto retry; + } + + cap->issued = 0; + cap->implemented = 0; + cap->mds = mds; + cap->mds_wanted = 0; + + cap->ci = ci; + __insert_cap_node(ci, cap); + + /* clear out old exporting info? (i.e. on cap import) */ + if (ci->i_cap_exporting_mds == mds) { + ci->i_cap_exporting_issued = 0; + ci->i_cap_exporting_mseq = 0; + ci->i_cap_exporting_mds = -1; + } + + /* add to session cap list */ + cap->session = session; + spin_lock(&session->s_cap_lock); + list_add_tail(&cap->session_caps, &session->s_caps); + session->s_nr_caps++; + spin_unlock(&session->s_cap_lock); + } + + if (!ci->i_snap_realm) { + /* + * add this inode to the appropriate snap realm + */ + struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc, + realmino); + if (realm) { + ceph_get_snap_realm(mdsc, realm); + spin_lock(&realm->inodes_with_caps_lock); + ci->i_snap_realm = realm; + list_add(&ci->i_snap_realm_item, + &realm->inodes_with_caps); + spin_unlock(&realm->inodes_with_caps_lock); + } else { + pr_err("ceph_add_cap: couldn't find snap realm %llx\n", + realmino); + } + } + + __check_cap_issue(ci, cap, issued); + + /* + * If we are issued caps we don't want, or the mds' wanted + * value appears to be off, queue a check so we'll release + * later and/or update the mds wanted value. + */ + actual_wanted = __ceph_caps_wanted(ci); + if ((wanted & ~actual_wanted) || + (issued & ~actual_wanted & CEPH_CAP_ANY_WR)) { + dout(" issued %s, mds wanted %s, actual %s, queueing\n", + ceph_cap_string(issued), ceph_cap_string(wanted), + ceph_cap_string(actual_wanted)); + __cap_delay_requeue(mdsc, ci); + } + + if (flags & CEPH_CAP_FLAG_AUTH) + ci->i_auth_cap = cap; + else if (ci->i_auth_cap == cap) + ci->i_auth_cap = NULL; + + dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n", + inode, ceph_vinop(inode), cap, ceph_cap_string(issued), + ceph_cap_string(issued|cap->issued), seq, mds); + cap->cap_id = cap_id; + cap->issued = issued; + cap->implemented |= issued; + cap->mds_wanted |= wanted; + cap->seq = seq; + cap->issue_seq = seq; + cap->mseq = mseq; + cap->gen = session->s_cap_gen; + + if (fmode >= 0) + __ceph_get_fmode(ci, fmode); + spin_unlock(&inode->i_lock); + wake_up(&ci->i_cap_wq); + return 0; +} + +/* + * Return true if cap has not timed out and belongs to the current + * generation of the MDS session (i.e. has not gone 'stale' due to + * us losing touch with the mds). + */ +static int __cap_is_valid(struct ceph_cap *cap) +{ + unsigned long ttl; + u32 gen; + + spin_lock(&cap->session->s_cap_lock); + gen = cap->session->s_cap_gen; + ttl = cap->session->s_cap_ttl; + spin_unlock(&cap->session->s_cap_lock); + + if (cap->gen < gen || time_after_eq(jiffies, ttl)) { + dout("__cap_is_valid %p cap %p issued %s " + "but STALE (gen %u vs %u)\n", &cap->ci->vfs_inode, + cap, ceph_cap_string(cap->issued), cap->gen, gen); + return 0; + } + + return 1; +} + +/* + * Return set of valid cap bits issued to us. Note that caps time + * out, and may be invalidated in bulk if the client session times out + * and session->s_cap_gen is bumped. + */ +int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented) +{ + int have = ci->i_snap_caps; + struct ceph_cap *cap; + struct rb_node *p; + + if (implemented) + *implemented = 0; + for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { + cap = rb_entry(p, struct ceph_cap, ci_node); + if (!__cap_is_valid(cap)) + continue; + dout("__ceph_caps_issued %p cap %p issued %s\n", + &ci->vfs_inode, cap, ceph_cap_string(cap->issued)); + have |= cap->issued; + if (implemented) + *implemented |= cap->implemented; + } + return have; +} + +/* + * Get cap bits issued by caps other than @ocap + */ +int __ceph_caps_issued_other(struct ceph_inode_info *ci, struct ceph_cap *ocap) +{ + int have = ci->i_snap_caps; + struct ceph_cap *cap; + struct rb_node *p; + + for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { + cap = rb_entry(p, struct ceph_cap, ci_node); + if (cap == ocap) + continue; + if (!__cap_is_valid(cap)) + continue; + have |= cap->issued; + } + return have; +} + +/* + * Move a cap to the end of the LRU (oldest caps at list head, newest + * at list tail). + */ +static void __touch_cap(struct ceph_cap *cap) +{ + struct ceph_mds_session *s = cap->session; + + dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap, + s->s_mds); + spin_lock(&s->s_cap_lock); + list_move_tail(&cap->session_caps, &s->s_caps); + spin_unlock(&s->s_cap_lock); +} + +/* + * Check if we hold the given mask. If so, move the cap(s) to the + * front of their respective LRUs. (This is the preferred way for + * callers to check for caps they want.) + */ +int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch) +{ + struct ceph_cap *cap; + struct rb_node *p; + int have = ci->i_snap_caps; + + if ((have & mask) == mask) { + dout("__ceph_caps_issued_mask %p snap issued %s" + " (mask %s)\n", &ci->vfs_inode, + ceph_cap_string(have), + ceph_cap_string(mask)); + return 1; + } + + for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { + cap = rb_entry(p, struct ceph_cap, ci_node); + if (!__cap_is_valid(cap)) + continue; + if ((cap->issued & mask) == mask) { + dout("__ceph_caps_issued_mask %p cap %p issued %s" + " (mask %s)\n", &ci->vfs_inode, cap, + ceph_cap_string(cap->issued), + ceph_cap_string(mask)); + if (touch) + __touch_cap(cap); + return 1; + } + + /* does a combination of caps satisfy mask? */ + have |= cap->issued; + if ((have & mask) == mask) { + dout("__ceph_caps_issued_mask %p combo issued %s" + " (mask %s)\n", &ci->vfs_inode, + ceph_cap_string(cap->issued), + ceph_cap_string(mask)); + if (touch) { + struct rb_node *q; + + /* touch this + preceeding caps */ + __touch_cap(cap); + for (q = rb_first(&ci->i_caps); q != p; + q = rb_next(q)) { + cap = rb_entry(q, struct ceph_cap, + ci_node); + if (!__cap_is_valid(cap)) + continue; + __touch_cap(cap); + } + } + return 1; + } + } + + return 0; +} + +/* + * Return true if mask caps are currently being revoked by an MDS. + */ +int ceph_caps_revoking(struct ceph_inode_info *ci, int mask) +{ + struct inode *inode = &ci->vfs_inode; + struct ceph_cap *cap; + struct rb_node *p; + int ret = 0; + + spin_lock(&inode->i_lock); + for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { + cap = rb_entry(p, struct ceph_cap, ci_node); + if (__cap_is_valid(cap) && + (cap->implemented & ~cap->issued & mask)) { + ret = 1; + break; + } + } + spin_unlock(&inode->i_lock); + dout("ceph_caps_revoking %p %s = %d\n", inode, + ceph_cap_string(mask), ret); + return ret; +} + +int __ceph_caps_used(struct ceph_inode_info *ci) +{ + int used = 0; + if (ci->i_pin_ref) + used |= CEPH_CAP_PIN; + if (ci->i_rd_ref) + used |= CEPH_CAP_FILE_RD; + if (ci->i_rdcache_ref || ci->i_rdcache_gen) + used |= CEPH_CAP_FILE_CACHE; + if (ci->i_wr_ref) + used |= CEPH_CAP_FILE_WR; + if (ci->i_wrbuffer_ref) + used |= CEPH_CAP_FILE_BUFFER; + return used; +} + +/* + * wanted, by virtue of open file modes + */ +int __ceph_caps_file_wanted(struct ceph_inode_info *ci) +{ + int want = 0; + int mode; + for (mode = 0; mode < 4; mode++) + if (ci->i_nr_by_mode[mode]) + want |= ceph_caps_for_mode(mode); + return want; +} + +/* + * Return caps we have registered with the MDS(s) as 'wanted'. + */ +int __ceph_caps_mds_wanted(struct ceph_inode_info *ci) +{ + struct ceph_cap *cap; + struct rb_node *p; + int mds_wanted = 0; + + for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { + cap = rb_entry(p, struct ceph_cap, ci_node); + if (!__cap_is_valid(cap)) + continue; + mds_wanted |= cap->mds_wanted; + } + return mds_wanted; +} + +/* + * called under i_lock + */ +static int __ceph_is_any_caps(struct ceph_inode_info *ci) +{ + return !RB_EMPTY_ROOT(&ci->i_caps) || ci->i_cap_exporting_mds >= 0; +} + +/* + * caller should hold i_lock, and session s_mutex. + * returns true if this is the last cap. if so, caller should iput. + */ +void __ceph_remove_cap(struct ceph_cap *cap, + struct ceph_cap_reservation *ctx) +{ + struct ceph_mds_session *session = cap->session; + struct ceph_inode_info *ci = cap->ci; + struct ceph_mds_client *mdsc = &ceph_client(ci->vfs_inode.i_sb)->mdsc; + + dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode); + + /* remove from session list */ + spin_lock(&session->s_cap_lock); + list_del_init(&cap->session_caps); + session->s_nr_caps--; + spin_unlock(&session->s_cap_lock); + + /* remove from inode list */ + rb_erase(&cap->ci_node, &ci->i_caps); + cap->session = NULL; + if (ci->i_auth_cap == cap) + ci->i_auth_cap = NULL; + + put_cap(cap, ctx); + + if (!__ceph_is_any_caps(ci) && ci->i_snap_realm) { + struct ceph_snap_realm *realm = ci->i_snap_realm; + spin_lock(&realm->inodes_with_caps_lock); + list_del_init(&ci->i_snap_realm_item); + ci->i_snap_realm_counter++; + ci->i_snap_realm = NULL; + spin_unlock(&realm->inodes_with_caps_lock); + ceph_put_snap_realm(mdsc, realm); + } + if (!__ceph_is_any_real_caps(ci)) + __cap_delay_cancel(mdsc, ci); +} + +/* + * Build and send a cap message to the given MDS. + * + * Caller should be holding s_mutex. + */ +static int send_cap_msg(struct ceph_mds_session *session, + u64 ino, u64 cid, int op, + int caps, int wanted, int dirty, + u32 seq, u64 flush_tid, u32 issue_seq, u32 mseq, + u64 size, u64 max_size, + struct timespec *mtime, struct timespec *atime, + u64 time_warp_seq, + uid_t uid, gid_t gid, mode_t mode, + u64 xattr_version, + struct ceph_buffer *xattrs_buf, + u64 follows) +{ + struct ceph_mds_caps *fc; + struct ceph_msg *msg; + + dout("send_cap_msg %s %llx %llx caps %s wanted %s dirty %s" + " seq %u/%u mseq %u follows %lld size %llu/%llu" + " xattr_ver %llu xattr_len %d\n", ceph_cap_op_name(op), + cid, ino, ceph_cap_string(caps), ceph_cap_string(wanted), + ceph_cap_string(dirty), + seq, issue_seq, mseq, follows, size, max_size, + xattr_version, xattrs_buf ? (int)xattrs_buf->vec.iov_len : 0); + + msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc), 0, 0, NULL); + if (IS_ERR(msg)) + return PTR_ERR(msg); + + fc = msg->front.iov_base; + + memset(fc, 0, sizeof(*fc)); + + fc->cap_id = cpu_to_le64(cid); + fc->op = cpu_to_le32(op); + fc->seq = cpu_to_le32(seq); + fc->client_tid = cpu_to_le64(flush_tid); + fc->issue_seq = cpu_to_le32(issue_seq); + fc->migrate_seq = cpu_to_le32(mseq); + fc->caps = cpu_to_le32(caps); + fc->wanted = cpu_to_le32(wanted); + fc->dirty = cpu_to_le32(dirty); + fc->ino = cpu_to_le64(ino); + fc->snap_follows = cpu_to_le64(follows); + + fc->size = cpu_to_le64(size); + fc->max_size = cpu_to_le64(max_size); + if (mtime) + ceph_encode_timespec(&fc->mtime, mtime); + if (atime) + ceph_encode_timespec(&fc->atime, atime); + fc->time_warp_seq = cpu_to_le32(time_warp_seq); + + fc->uid = cpu_to_le32(uid); + fc->gid = cpu_to_le32(gid); + fc->mode = cpu_to_le32(mode); + + fc->xattr_version = cpu_to_le64(xattr_version); + if (xattrs_buf) { + msg->middle = ceph_buffer_get(xattrs_buf); + fc->xattr_len = cpu_to_le32(xattrs_buf->vec.iov_len); + msg->hdr.middle_len = cpu_to_le32(xattrs_buf->vec.iov_len); + } + + ceph_con_send(&session->s_con, msg); + return 0; +} + +/* + * Queue cap releases when an inode is dropped from our + * cache. + */ +void ceph_queue_caps_release(struct inode *inode) +{ + struct ceph_inode_info *ci = ceph_inode(inode); + struct rb_node *p; + + spin_lock(&inode->i_lock); + p = rb_first(&ci->i_caps); + while (p) { + struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node); + struct ceph_mds_session *session = cap->session; + struct ceph_msg *msg; + struct ceph_mds_cap_release *head; + struct ceph_mds_cap_item *item; + + spin_lock(&session->s_cap_lock); + BUG_ON(!session->s_num_cap_releases); + msg = list_first_entry(&session->s_cap_releases, + struct ceph_msg, list_head); + + dout(" adding %p release to mds%d msg %p (%d left)\n", + inode, session->s_mds, msg, session->s_num_cap_releases); + + BUG_ON(msg->front.iov_len + sizeof(*item) > PAGE_CACHE_SIZE); + head = msg->front.iov_base; + head->num = cpu_to_le32(le32_to_cpu(head->num) + 1); + item = msg->front.iov_base + msg->front.iov_len; + item->ino = cpu_to_le64(ceph_ino(inode)); + item->cap_id = cpu_to_le64(cap->cap_id); + item->migrate_seq = cpu_to_le32(cap->mseq); + item->seq = cpu_to_le32(cap->issue_seq); + + session->s_num_cap_releases--; + + msg->front.iov_len += sizeof(*item); + if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) { + dout(" release msg %p full\n", msg); + list_move_tail(&msg->list_head, + &session->s_cap_releases_done); + } else { + dout(" release msg %p at %d/%d (%d)\n", msg, + (int)le32_to_cpu(head->num), + (int)CEPH_CAPS_PER_RELEASE, + (int)msg->front.iov_len); + } + spin_unlock(&session->s_cap_lock); + p = rb_next(p); + __ceph_remove_cap(cap, NULL); + + } + spin_unlock(&inode->i_lock); +} + +/* + * Send a cap msg on the given inode. Update our caps state, then + * drop i_lock and send the message. + * + * Make note of max_size reported/requested from mds, revoked caps + * that have now been implemented. + * + * Make half-hearted attempt ot to invalidate page cache if we are + * dropping RDCACHE. Note that this will leave behind locked pages + * that we'll then need to deal with elsewhere. + * + * Return non-zero if delayed release, or we experienced an error + * such that the caller should requeue + retry later. + * + * called with i_lock, then drops it. + * caller should hold snap_rwsem (read), s_mutex. + */ +static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap, + int op, int used, int want, int retain, int flushing, + unsigned *pflush_tid) + __releases(cap->ci->vfs_inode->i_lock) +{ + struct ceph_inode_info *ci = cap->ci; + struct inode *inode = &ci->vfs_inode; + u64 cap_id = cap->cap_id; + int held = cap->issued | cap->implemented; + int revoking = cap->implemented & ~cap->issued; + int dropping = cap->issued & ~retain; + int keep; + u64 seq, issue_seq, mseq, time_warp_seq, follows; + u64 size, max_size; + struct timespec mtime, atime; + int wake = 0; + mode_t mode; + uid_t uid; + gid_t gid; + struct ceph_mds_session *session; + u64 xattr_version = 0; + int delayed = 0; + u64 flush_tid = 0; + int i; + int ret; + + dout("__send_cap %p cap %p session %p %s -> %s (revoking %s)\n", + inode, cap, cap->session, + ceph_cap_string(held), ceph_cap_string(held & retain), + ceph_cap_string(revoking)); + BUG_ON((retain & CEPH_CAP_PIN) == 0); + + session = cap->session; + + /* don't release wanted unless we've waited a bit. */ + if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 && + time_before(jiffies, ci->i_hold_caps_min)) { + dout(" delaying issued %s -> %s, wanted %s -> %s on send\n", + ceph_cap_string(cap->issued), + ceph_cap_string(cap->issued & retain), + ceph_cap_string(cap->mds_wanted), + ceph_cap_string(want)); + want |= cap->mds_wanted; + retain |= cap->issued; + delayed = 1; + } + ci->i_ceph_flags &= ~(CEPH_I_NODELAY | CEPH_I_FLUSH); + + cap->issued &= retain; /* drop bits we don't want */ + if (cap->implemented & ~cap->issued) { + /* + * Wake up any waiters on wanted -> needed transition. + * This is due to the weird transition from buffered + * to sync IO... we need to flush dirty pages _before_ + * allowing sync writes to avoid reordering. + */ + wake = 1; + } + cap->implemented &= cap->issued | used; + cap->mds_wanted = want; + + if (flushing) { + /* + * assign a tid for flush operations so we can avoid + * flush1 -> dirty1 -> flush2 -> flushack1 -> mark + * clean type races. track latest tid for every bit + * so we can handle flush AxFw, flush Fw, and have the + * first ack clean Ax. + */ + flush_tid = ++ci->i_cap_flush_last_tid; + if (pflush_tid) + *pflush_tid = flush_tid; + dout(" cap_flush_tid %d\n", (int)flush_tid); + for (i = 0; i < CEPH_CAP_BITS; i++) + if (flushing & (1 << i)) + ci->i_cap_flush_tid[i] = flush_tid; + } + + keep = cap->implemented; + seq = cap->seq; + issue_seq = cap->issue_seq; + mseq = cap->mseq; + size = inode->i_size; + ci->i_reported_size = size; + max_size = ci->i_wanted_max_size; + ci->i_requested_max_size = max_size; + mtime = inode->i_mtime; + atime = inode->i_atime; + time_warp_seq = ci->i_time_warp_seq; + follows = ci->i_snap_realm->cached_context->seq; + uid = inode->i_uid; + gid = inode->i_gid; + mode = inode->i_mode; + + if (dropping & CEPH_CAP_XATTR_EXCL) { + __ceph_build_xattrs_blob(ci); + xattr_version = ci->i_xattrs.version + 1; + } + + spin_unlock(&inode->i_lock); + + if (dropping & CEPH_CAP_FILE_CACHE) { + /* invalidate what we can */ + dout("invalidating pages on %p\n", inode); + invalidate_mapping_pages(&inode->i_data, 0, -1); + } + + ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id, + op, keep, want, flushing, seq, flush_tid, issue_seq, mseq, + size, max_size, &mtime, &atime, time_warp_seq, + uid, gid, mode, + xattr_version, + (flushing & CEPH_CAP_XATTR_EXCL) ? ci->i_xattrs.blob : NULL, + follows); + if (ret < 0) { + dout("error sending cap msg, must requeue %p\n", inode); + delayed = 1; + } + + if (wake) + wake_up(&ci->i_cap_wq); + + return delayed; +} + +/* + * When a snapshot is taken, clients accumulate dirty metadata on + * inodes with capabilities in ceph_cap_snaps to describe the file + * state at the time the snapshot was taken. This must be flushed + * asynchronously back to the MDS once sync writes complete and dirty + * data is written out. + * + * Called under i_lock. Takes s_mutex as needed. + */ +void __ceph_flush_snaps(struct ceph_inode_info *ci, + struct ceph_mds_session **psession) +{ + struct inode *inode = &ci->vfs_inode; + int mds; + struct ceph_cap_snap *capsnap; + u32 mseq; + struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc; + struct ceph_mds_session *session = NULL; /* if session != NULL, we hold + session->s_mutex */ + u64 next_follows = 0; /* keep track of how far we've gotten through the + i_cap_snaps list, and skip these entries next time + around to avoid an infinite loop */ + + if (psession) + session = *psession; + + dout("__flush_snaps %p\n", inode); +retry: + list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { + /* avoid an infiniute loop after retry */ + if (capsnap->follows < next_follows) + continue; + /* + * we need to wait for sync writes to complete and for dirty + * pages to be written out. + */ + if (capsnap->dirty_pages || capsnap->writing) + continue; + + /* pick mds, take s_mutex */ + mds = __ceph_get_cap_mds(ci, &mseq); + if (session && session->s_mds != mds) { + dout("oops, wrong session %p mutex\n", session); + mutex_unlock(&session->s_mutex); + ceph_put_mds_session(session); + session = NULL; + } + if (!session) { + spin_unlock(&inode->i_lock); + mutex_lock(&mdsc->mutex); + session = __ceph_lookup_mds_session(mdsc, mds); + mutex_unlock(&mdsc->mutex); + if (session) { + dout("inverting session/ino locks on %p\n", + session); + mutex_lock(&session->s_mutex); + } + /* + * if session == NULL, we raced against a cap + * deletion. retry, and we'll get a better + * @mds value next time. + */ + spin_lock(&inode->i_lock); + goto retry; + } + + capsnap->flush_tid = ++ci->i_cap_flush_last_tid; + atomic_inc(&capsnap->nref); + if (!list_empty(&capsnap->flushing_item)) + list_del_init(&capsnap->flushing_item); + list_add_tail(&capsnap->flushing_item, + &session->s_cap_snaps_flushing); + spin_unlock(&inode->i_lock); + + dout("flush_snaps %p cap_snap %p follows %lld size %llu\n", + inode, capsnap, next_follows, capsnap->size); + send_cap_msg(session, ceph_vino(inode).ino, 0, + CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0, + capsnap->dirty, 0, capsnap->flush_tid, 0, mseq, + capsnap->size, 0, + &capsnap->mtime, &capsnap->atime, + capsnap->time_warp_seq, + capsnap->uid, capsnap->gid, capsnap->mode, + 0, NULL, + capsnap->follows); + + next_follows = capsnap->follows + 1; + ceph_put_cap_snap(capsnap); + + spin_lock(&inode->i_lock); + goto retry; + } + + /* we flushed them all; remove this inode from the queue */ + spin_lock(&mdsc->snap_flush_lock); + list_del_init(&ci->i_snap_flush_item); + spin_unlock(&mdsc->snap_flush_lock); + + if (psession) + *psession = session; + else if (session) { + mutex_unlock(&session->s_mutex); + ceph_put_mds_session(session); + } +} + +static void ceph_flush_snaps(struct ceph_inode_info *ci) +{ + struct inode *inode = &ci->vfs_inode; + + spin_lock(&inode->i_lock); + __ceph_flush_snaps(ci, NULL); + spin_unlock(&inode->i_lock); +} + +/* + * Add dirty inode to the flushing list. Assigned a seq number so we + * can wait for caps to flush without starving. + */ +static void __mark_caps_flushing(struct inode *inode, + struct ceph_mds_session *session) +{ + struct ceph_mds_client *mdsc = &ceph_client(inode->i_sb)->mdsc; + struct ceph_inode_info *ci = ceph_inode(inode); + + BUG_ON(list_empty(&ci->i_dirty_item)); + spin_lock(&mdsc->cap_dirty_lock); + if (list_empty(&ci->i_flushing_item)) { + list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing); + mdsc->num_cap_flushing++; + ci->i_cap_flush_seq = ++mdsc->cap_flush_seq; + dout(" inode %p now flushing seq %lld\n", &ci->vfs_inode, + ci->i_cap_flush_seq); + } + spin_unlock(&mdsc->cap_dirty_lock); +} + +/* + * Swiss army knife function to examine currently used and wanted + * versus held caps. Release, flush, ack revoked caps to mds as + * appropriate. + * + * CHECK_CAPS_NODELAY - caller is delayed work and we should not delay + * cap release further. + * CHECK_CAPS_AUTHONLY - we should only check the auth cap + * CHECK_CAPS_FLUSH - we should flush any dirty caps immediately, without + * further delay. + */ +void ceph_check_caps(struct ceph_inode_info *ci, int flags, + struct ceph_mds_session *session) +{ + struct ceph_client *client = ceph_inode_to_client(&ci->vfs_inode); + struct ceph_mds_client *mdsc = &client->mdsc; + struct inode *inode = &ci->vfs_inode; + struct ceph_cap *cap; + int file_wanted, used; + int took_snap_rwsem = 0; /* true if mdsc->snap_rwsem held */ + int drop_session_lock = session ? 0 : 1; + int want, retain, revoking, flushing = 0; + int mds = -1; /* keep track of how far we've gone through i_caps list + to avoid an infinite loop on retry */ + struct rb_node *p; + int tried_invalidate = 0; + int delayed = 0, sent = 0, force_requeue = 0, num; + int is_delayed = flags & CHECK_CAPS_NODELAY; + + /* if we are unmounting, flush any unused caps immediately. */ + if (mdsc->stopping) + is_delayed = 1; + + spin_lock(&inode->i_lock); + + if (ci->i_ceph_flags & CEPH_I_FLUSH) + flags |= CHECK_CAPS_FLUSH; + + /* flush snaps first time around only */ + if (!list_empty(&ci->i_cap_snaps)) + __ceph_flush_snaps(ci, &session); + goto retry_locked; +retry: + spin_lock(&inode->i_lock); +retry_locked: + file_wanted = __ceph_caps_file_wanted(ci); + used = __ceph_caps_used(ci); + want = file_wanted | used; + + retain = want | CEPH_CAP_PIN; + if (!mdsc->stopping && inode->i_nlink > 0) { + if (want) { + retain |= CEPH_CAP_ANY; /* be greedy */ + } else { + retain |= CEPH_CAP_ANY_SHARED; + /* + * keep RD only if we didn't have the file open RW, + * because then the mds would revoke it anyway to + * journal max_size=0. + */ + if (ci->i_max_size == 0) + retain |= CEPH_CAP_ANY_RD; + } + } + + dout("check_caps %p file_want %s used %s dirty %s flushing %s" + " issued %s retain %s %s%s%s\n", inode, + ceph_cap_string(file_wanted), + ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps), + ceph_cap_string(ci->i_flushing_caps), + ceph_cap_string(__ceph_caps_issued(ci, NULL)), + ceph_cap_string(retain), + (flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "", + (flags & CHECK_CAPS_NODELAY) ? " NODELAY" : "", + (flags & CHECK_CAPS_FLUSH) ? " FLUSH" : ""); + + /* + * If we no longer need to hold onto old our caps, and we may + * have cached pages, but don't want them, then try to invalidate. + * If we fail, it's because pages are locked.... try again later. + */ + if ((!is_delayed || mdsc->stopping) && + ci->i_wrbuffer_ref == 0 && /* no dirty pages... */ + ci->i_rdcache_gen && /* may have cached pages */ + file_wanted == 0 && /* no open files */ + !ci->i_truncate_pending && + !tried_invalidate) { + u32 invalidating_gen = ci->i_rdcache_gen; + int ret; + + dout("check_caps trying to invalidate on %p\n", inode); + spin_unlock(&inode->i_lock); + ret = invalidate_inode_pages2(&inode->i_data); + spin_lock(&inode->i_lock); + if (ret == 0 && invalidating_gen == ci->i_rdcache_gen) { + /* success. */ + ci->i_rdcache_gen = 0; + ci->i_rdcache_revoking = 0; + } else { + dout("check_caps failed to invalidate pages\n"); + /* we failed to invalidate pages. check these + caps again later. */ + force_requeue = 1; + __cap_set_timeouts(mdsc, ci); + } + tried_invalidate = 1; + goto retry_locked; + } + + num = 0; + for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { + cap = rb_entry(p, struct ceph_cap, ci_node); + num++; + + /* avoid looping forever */ + if (mds >= cap->mds || + ((flags & CHECK_CAPS_AUTHONLY) && cap != ci->i_auth_cap)) + continue; + + /* NOTE: no side-effects allowed, until we take s_mutex */ + + revoking = cap->implemented & ~cap->issued; + if (revoking) + dout("mds%d revoking %s\n", cap->mds, + ceph_cap_string(revoking)); + + if (cap == ci->i_auth_cap && + (cap->issued & CEPH_CAP_FILE_WR)) { + /* request larger max_size from MDS? */ + if (ci->i_wanted_max_size > ci->i_max_size && + ci->i_wanted_max_size > ci->i_requested_max_size) { + dout("requesting new max_size\n"); + goto ack; + } + + /* approaching file_max? */ + if ((inode->i_size << 1) >= ci->i_max_size && + (ci->i_reported_size << 1) < ci->i_max_size) { + dout("i_size approaching max_size\n"); + goto ack; + } + } + /* flush anything dirty? */ + if (cap == ci->i_auth_cap && (flags & CHECK_CAPS_FLUSH) && + ci->i_dirty_caps) { + dout("flushing dirty caps\n"); + goto ack; + } + + /* completed revocation? going down and there are no caps? */ + if (revoking && (revoking & used) == 0) { + dout("completed revocation of %s\n", + ceph_cap_string(cap->implemented & ~cap->issued)); + goto ack; + } + + /* want more caps from mds? */ + if (want & ~(cap->mds_wanted | cap->issued)) + goto ack; + + /* things we might delay */ + if ((cap->issued & ~retain) == 0 && + cap->mds_wanted == want) + continue; /* nope, all good */ + + if (is_delayed) + goto ack; + + /* delay? */ + if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 && + time_before(jiffies, ci->i_hold_caps_max)) { + dout(" delaying issued %s -> %s, wanted %s -> %s\n", + ceph_cap_string(cap->issued), + ceph_cap_string(cap->issued & retain), + ceph_cap_string(cap->mds_wanted), + ceph_cap_string(want)); + delayed++; + continue; + } + +ack: + if (session && session != cap->session) { + dout("oops, wrong session %p mutex\n", session); + mutex_unlock(&session->s_mutex); + session = NULL; + } + if (!session) { + session = cap->session; + if (mutex_trylock(&session->s_mutex) == 0) { + dout("inverting session/ino locks on %p\n", + session); + spin_unlock(&inode->i_lock); + if (took_snap_rwsem) { + up_read(&mdsc->snap_rwsem); + took_snap_rwsem = 0; + } + mutex_lock(&session->s_mutex); + goto retry; + } + } + /* take snap_rwsem after session mutex */ + if (!took_snap_rwsem) { + if (down_read_trylock(&mdsc->snap_rwsem) == 0) { + dout("inverting snap/in locks on %p\n", + inode); + spin_unlock(&inode->i_lock); + down_read(&mdsc->snap_rwsem); + took_snap_rwsem = 1; + goto retry; + } + took_snap_rwsem = 1; + } + + if (cap == ci->i_auth_cap && ci->i_dirty_caps) { + /* update dirty, flushing bits */ + flushing = ci->i_dirty_caps; + dout(" flushing %s, flushing_caps %s -> %s\n", + ceph_cap_string(flushing), + ceph_cap_string(ci->i_flushing_caps), + ceph_cap_string(ci->i_flushing_caps | flushing)); + ci->i_flushing_caps |= flushing; + ci->i_dirty_caps = 0; + __mark_caps_flushing(inode, session); + } + + mds = cap->mds; /* remember mds, so we don't repeat */ + sent++; + + /* __send_cap drops i_lock */ + delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, used, want, + retain, flushing, NULL); + goto retry; /* retake i_lock and restart our cap scan. */ + } + + /* + * Reschedule delayed caps release if we delayed anything, + * otherwise cancel. + */ + if (delayed && is_delayed) + force_requeue = 1; /* __send_cap delayed release; requeue */ + if (!delayed && !is_delayed) + __cap_delay_cancel(mdsc, ci); + else if (!is_delayed || force_requeue) + __cap_delay_requeue(mdsc, ci); + + spin_unlock(&inode->i_lock); + + if (session && drop_session_lock) + mutex_unlock(&session->s_mutex); + if (took_snap_rwsem) + up_read(&mdsc->snap_rwsem); +} + +/* + * Mark caps dirty. If inode is newly dirty, add to the global dirty + * list. + */ +int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask) +{ + struct ceph_mds_client *mdsc = &ceph_client(ci->vfs_inode.i_sb)->mdsc; + struct inode *inode = &ci->vfs_inode; + int was = __ceph_caps_dirty(ci); + int dirty = 0; + + dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode, + ceph_cap_string(mask), ceph_cap_string(ci->i_dirty_caps), + ceph_cap_string(ci->i_dirty_caps | mask)); + ci->i_dirty_caps |= mask; + if (!was) { + dout(" inode %p now dirty\n", &ci->vfs_inode); + spin_lock(&mdsc->cap_dirty_lock); + list_add(&ci->i_dirty_item, &mdsc->cap_dirty); + spin_unlock(&mdsc->cap_dirty_lock); + igrab(inode); + dirty |= I_DIRTY_SYNC; + } + if ((was & CEPH_CAP_FILE_BUFFER) && + (mask & CEPH_CAP_FILE_BUFFER)) + dirty |= I_DIRTY_DATASYNC; + if (dirty) + __mark_inode_dirty(inode, dirty); + __cap_delay_requeue(mdsc, ci); + return was; +} + +/* + * Try to flush dirty caps back to the auth mds. + */ +static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session, + unsigned *flush_tid) +{ + struct ceph_mds_client *mdsc = &ceph_client(inode->i_sb)->mdsc; + struct ceph_inode_info *ci = ceph_inode(inode); + int unlock_session = session ? 0 : 1; + int flushing = 0; + +retry: + spin_lock(&inode->i_lock); + if (ci->i_dirty_caps && ci->i_auth_cap) { + struct ceph_cap *cap = ci->i_auth_cap; + int used = __ceph_caps_used(ci); + int want = __ceph_caps_wanted(ci); + int delayed; + + if (!session) { + spin_unlock(&inode->i_lock); + session = cap->session; + mutex_lock(&session->s_mutex); + goto retry; + } + BUG_ON(session != cap->session); + if (cap->session->s_state < CEPH_MDS_SESSION_OPEN) + goto out; + + __mark_caps_flushing(inode, session); + + flushing = ci->i_dirty_caps; + dout(" flushing %s, flushing_caps %s -> %s\n", + ceph_cap_string(flushing), + ceph_cap_string(ci->i_flushing_caps), + ceph_cap_string(ci->i_flushing_caps | flushing)); + ci->i_flushing_caps |= flushing; + ci->i_dirty_caps = 0; + + /* __send_cap drops i_lock */ + delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want, + cap->issued | cap->implemented, flushing, + flush_tid); + if (!delayed) + goto out_unlocked; + + spin_lock(&inode->i_lock); + __cap_delay_requeue(mdsc, ci); + } +out: + spin_unlock(&inode->i_lock); +out_unlocked: + if (session && unlock_session) + mutex_unlock(&session->s_mutex); + return flushing; +} + +/* + * Return true if we've flushed caps through the given flush_tid. + */ +static int caps_are_flushed(struct inode *inode, unsigned tid) +{ + struct ceph_inode_info *ci = ceph_inode(inode); + int dirty, i, ret = 1; + + spin_lock(&inode->i_lock); + dirty = __ceph_caps_dirty(ci); + for (i = 0; i < CEPH_CAP_BITS; i++) + if ((ci->i_flushing_caps & (1 << i)) && + ci->i_cap_flush_tid[i] <= tid) { + /* still flushing this bit */ + ret = 0; + break; + } + spin_unlock(&inode->i_lock); + return ret; +} + +/* + * Wait on any unsafe replies for the given inode. First wait on the + * newest request, and make that the upper bound. Then, if there are + * more requests, keep waiting on the oldest as long as it is still older + * than the original request. + */ +static void sync_write_wait(struct inode *inode) +{ + struct ceph_inode_info *ci = ceph_inode(inode); + struct list_head *head = &ci->i_unsafe_writes; + struct ceph_osd_request *req; + u64 last_tid; + + spin_lock(&ci->i_unsafe_lock); + if (list_empty(head)) + goto out; + + /* set upper bound as _last_ entry in chain */ + req = list_entry(head->prev, struct ceph_osd_request, + r_unsafe_item); + last_tid = req->r_tid; + + do { + ceph_osdc_get_request(req); + spin_unlock(&ci->i_unsafe_lock); + dout("sync_write_wait on tid %llu (until %llu)\n", + req->r_tid, last_tid); + wait_for_completion(&req->r_safe_completion); + spin_lock(&ci->i_unsafe_lock); + ceph_osdc_put_request(req); + + /* + * from here on look at first entry in chain, since we + * only want to wait for anything older than last_tid + */ + if (list_empty(head)) + break; + req = list_entry(head->next, struct ceph_osd_request, + r_unsafe_item); + } while (req->r_tid < last_tid); +out: + spin_unlock(&ci->i_unsafe_lock); +} + +int ceph_fsync(struct file *file, struct dentry *dentry, int datasync) +{ + struct inode *inode = dentry->d_inode; + struct ceph_inode_info *ci = ceph_inode(inode); + unsigned flush_tid; + int ret; + int dirty; + + dout("fsync %p%s\n", inode, datasync ? " datasync" : ""); + sync_write_wait(inode); + + ret = filemap_write_and_wait(inode->i_mapping); + if (ret < 0) + return ret; + + dirty = try_flush_caps(inode, NULL, &flush_tid); + dout("fsync dirty caps are %s\n", ceph_cap_string(dirty)); + + /* + * only wait on non-file metadata writeback (the mds + * can recover size and mtime, so we don't need to + * wait for that) + */ + if (!datasync && (dirty & ~CEPH_CAP_ANY_FILE_WR)) { + dout("fsync waiting for flush_tid %u\n", flush_tid); + ret = wait_event_interruptible(ci->i_cap_wq, + caps_are_flushed(inode, flush_tid)); + } + + dout("fsync %p%s done\n", inode, datasync ? " datasync" : ""); + return ret; +} + +/* + * Flush any dirty caps back to the mds. If we aren't asked to wait, + * queue inode for flush but don't do so immediately, because we can + * get by with fewer MDS messages if we wait for data writeback to + * complete first. + */ +int ceph_write_inode(struct inode *inode, int wait) +{ + struct ceph_inode_info *ci = ceph_inode(inode); + unsigned flush_tid; + int err = 0; + int dirty; + + dout("write_inode %p wait=%d\n", inode, wait); + if (wait) { + dirty = try_flush_caps(inode, NULL, &flush_tid); + if (dirty) + err = wait_event_interruptible(ci->i_cap_wq, + caps_are_flushed(inode, flush_tid)); + } else { + struct ceph_mds_client *mdsc = &ceph_client(inode->i_sb)->mdsc; + + spin_lock(&inode->i_lock); + if (__ceph_caps_dirty(ci)) + __cap_delay_requeue_front(mdsc, ci); + spin_unlock(&inode->i_lock); + } + return err; +} + +/* + * After a recovering MDS goes active, we need to resend any caps + * we were flushing. + * + * Caller holds session->s_mutex. + */ +static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc, + struct ceph_mds_session *session) +{ + struct ceph_cap_snap *capsnap; + + dout("kick_flushing_capsnaps mds%d\n", session->s_mds); + list_for_each_entry(capsnap, &session->s_cap_snaps_flushing, + flushing_item) { + struct ceph_inode_info *ci = capsnap->ci; + struct inode *inode = &ci->vfs_inode; + struct ceph_cap *cap; + + spin_lock(&inode->i_lock); + cap = ci->i_auth_cap; + if (cap && cap->session == session) { + dout("kick_flushing_caps %p cap %p capsnap %p\n", inode, + cap, capsnap); + __ceph_flush_snaps(ci, &session); + } else { + pr_err("%p auth cap %p not mds%d ???\n", inode, + cap, session->s_mds); + spin_unlock(&inode->i_lock); + } + } +} + +void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc, + struct ceph_mds_session *session) +{ + struct ceph_inode_info *ci; + + kick_flushing_capsnaps(mdsc, session); + + dout("kick_flushing_caps mds%d\n", session->s_mds); + list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) { + struct inode *inode = &ci->vfs_inode; + struct ceph_cap *cap; + int delayed = 0; + + spin_lock(&inode->i_lock); + cap = ci->i_auth_cap; + if (cap && cap->session == session) { + dout("kick_flushing_caps %p cap %p %s\n", inode, + cap, ceph_cap_string(ci->i_flushing_caps)); + delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, + __ceph_caps_used(ci), + __ceph_caps_wanted(ci), + cap->issued | cap->implemented, + ci->i_flushing_caps, NULL); + if (delayed) { + spin_lock(&inode->i_lock); + __cap_delay_requeue(mdsc, ci); + spin_unlock(&inode->i_lock); + } + } else { + pr_err("%p auth cap %p not mds%d ???\n", inode, + cap, session->s_mds); + spin_unlock(&inode->i_lock); + } + } +} + + +/* + * Take references to capabilities we hold, so that we don't release + * them to the MDS prematurely. + * + * Protected by i_lock. + */ +static void __take_cap_refs(struct ceph_inode_info *ci, int got) +{ + if (got & CEPH_CAP_PIN) + ci->i_pin_ref++; + if (got & CEPH_CAP_FILE_RD) + ci->i_rd_ref++; + if (got & CEPH_CAP_FILE_CACHE) + ci->i_rdcache_ref++; + if (got & CEPH_CAP_FILE_WR) + ci->i_wr_ref++; + if (got & CEPH_CAP_FILE_BUFFER) { + if (ci->i_wrbuffer_ref == 0) + igrab(&ci->vfs_inode); + ci->i_wrbuffer_ref++; + dout("__take_cap_refs %p wrbuffer %d -> %d (?)\n", + &ci->vfs_inode, ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref); + } +} + +/* + * Try to grab cap references. Specify those refs we @want, and the + * minimal set we @need. Also include the larger offset we are writing + * to (when applicable), and check against max_size here as well. + * Note that caller is responsible for ensuring max_size increases are + * requested from the MDS. + */ +static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want, + int *got, loff_t endoff, int *check_max, int *err) +{ + struct inode *inode = &ci->vfs_inode; + int ret = 0; + int have, implemented; + + dout("get_cap_refs %p need %s want %s\n", inode, + ceph_cap_string(need), ceph_cap_string(want)); + spin_lock(&inode->i_lock); + + /* make sure we _have_ some caps! */ + if (!__ceph_is_any_caps(ci)) { + dout("get_cap_refs %p no real caps\n", inode); + *err = -EBADF; + ret = 1; + goto out; + } + + if (need & CEPH_CAP_FILE_WR) { + if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) { + dout("get_cap_refs %p endoff %llu > maxsize %llu\n", + inode, endoff, ci->i_max_size); + if (endoff > ci->i_wanted_max_size) { + *check_max = 1; + ret = 1; + } + goto out; + } + /* + * If a sync write is in progress, we must wait, so that we + * can get a final snapshot value for size+mtime. + */ + if (__ceph_have_pending_cap_snap(ci)) { + dout("get_cap_refs %p cap_snap_pending\n", inode); + goto out; + } + } + have = __ceph_caps_issued(ci, &implemented); + + /* + * disallow writes while a truncate is pending + */ + if (ci->i_truncate_pending) + have &= ~CEPH_CAP_FILE_WR; + + if ((have & need) == need) { + /* + * Look at (implemented & ~have & not) so that we keep waiting + * on transition from wanted -> needed caps. This is needed + * for WRBUFFER|WR -> WR to avoid a new WR sync write from + * going before a prior buffered writeback happens. + */ + int not = want & ~(have & need); + int revoking = implemented & ~have; + dout("get_cap_refs %p have %s but not %s (revoking %s)\n", + inode, ceph_cap_string(have), ceph_cap_string(not), + ceph_cap_string(revoking)); + if ((revoking & not) == 0) { + *got = need | (have & want); + __take_cap_refs(ci, *got); + ret = 1; + } + } else { + dout("get_cap_refs %p have %s needed %s\n", inode, + ceph_cap_string(have), ceph_cap_string(need)); + } +out: + spin_unlock(&inode->i_lock); + dout("get_cap_refs %p ret %d got %s\n", inode, + ret, ceph_cap_string(*got)); + return ret; +} + +/* + * Check the offset we are writing up to against our current + * max_size. If necessary, tell the MDS we want to write to + * a larger offset. + */ +static void check_max_size(struct inode *inode, loff_t endoff) +{ + struct ceph_inode_info *ci = ceph_inode(inode); + int check = 0; + + /* do we need to explicitly request a larger max_size? */ + spin_lock(&inode->i_lock); + if ((endoff >= ci->i_max_size || + endoff > (inode->i_size << 1)) && + endoff > ci->i_wanted_max_size) { + dout("write %p at large endoff %llu, req max_size\n", + inode, endoff); + ci->i_wanted_max_size = endoff; + check = 1; + } + spin_unlock(&inode->i_lock); + if (check) + ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); +} + +/* + * Wait for caps, and take cap references. If we can't get a WR cap + * due to a small max_size, make sure we check_max_size (and possibly + * ask the mds) so we don't get hung up indefinitely. + */ +int ceph_get_caps(struct ceph_inode_info *ci, int need, int want, int *got, + loff_t endoff) +{ + int check_max, ret, err; + +retry: + if (endoff > 0) + check_max_size(&ci->vfs_inode, endoff); + check_max = 0; + err = 0; + ret = wait_event_interruptible(ci->i_cap_wq, + try_get_cap_refs(ci, need, want, + got, endoff, + &check_max, &err)); + if (err) + ret = err; + if (check_max) + goto retry; + return ret; +} + +/* + * Take cap refs. Caller must already know we hold at least one ref + * on the caps in question or we don't know this is safe. + */ +void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps) +{ + spin_lock(&ci->vfs_inode.i_lock); + __take_cap_refs(ci, caps); + spin_unlock(&ci->vfs_inode.i_lock); +} + +/* + * Release cap refs. + * + * If we released the last ref on any given cap, call ceph_check_caps + * to release (or schedule a release). + * + * If we are releasing a WR cap (from a sync write), finalize any affected + * cap_snap, and wake up any waiters. + */ +void ceph_put_cap_refs(struct ceph_inode_info *ci, int had) +{ + struct inode *inode = &ci->vfs_inode; + int last = 0, put = 0, flushsnaps = 0, wake = 0; + struct ceph_cap_snap *capsnap; + + spin_lock(&inode->i_lock); + if (had & CEPH_CAP_PIN) + --ci->i_pin_ref; + if (had & CEPH_CAP_FILE_RD) + if (--ci->i_rd_ref == 0) + last++; + if (had & CEPH_CAP_FILE_CACHE) + if (--ci->i_rdcache_ref == 0) + last++; + if (had & CEPH_CAP_FILE_BUFFER) { + if (--ci->i_wrbuffer_ref == 0) { + last++; + put++; + } + dout("put_cap_refs %p wrbuffer %d -> %d (?)\n", + inode, ci->i_wrbuffer_ref+1, ci->i_wrbuffer_ref); + } + if (had & CEPH_CAP_FILE_WR) + if (--ci->i_wr_ref == 0) { + last++; + if (!list_empty(&ci->i_cap_snaps)) { + capsnap = list_first_entry(&ci->i_cap_snaps, + struct ceph_cap_snap, + ci_item); + if (capsnap->writing) { + capsnap->writing = 0; + flushsnaps = + __ceph_finish_cap_snap(ci, + capsnap); + wake = 1; + } + } + } + spin_unlock(&inode->i_lock); + + dout("put_cap_refs %p had %s %s\n", inode, ceph_cap_string(had), + last ? "last" : ""); + + if (last && !flushsnaps) + ceph_check_caps(ci, 0, NULL); + else if (flushsnaps) + ceph_flush_snaps(ci); + if (wake) + wake_up(&ci->i_cap_wq); + if (put) + iput(inode); +} + +/* + * Release @nr WRBUFFER refs on dirty pages for the given @snapc snap + * context. Adjust per-snap dirty page accounting as appropriate. + * Once all dirty data for a cap_snap is flushed, flush snapped file + * metadata back to the MDS. If we dropped the last ref, call + * ceph_check_caps. + */ +void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr, + struct ceph_snap_context *snapc) +{ + struct inode *inode = &ci->vfs_inode; + int last = 0; + int last_snap = 0; + int found = 0; + struct ceph_cap_snap *capsnap = NULL; + + spin_lock(&inode->i_lock); + ci->i_wrbuffer_ref -= nr; + last = !ci->i_wrbuffer_ref; + + if (ci->i_head_snapc == snapc) { + ci->i_wrbuffer_ref_head -= nr; + if (!ci->i_wrbuffer_ref_head) { + ceph_put_snap_context(ci->i_head_snapc); + ci->i_head_snapc = NULL; + } + dout("put_wrbuffer_cap_refs on %p head %d/%d -> %d/%d %s\n", + inode, + ci->i_wrbuffer_ref+nr, ci->i_wrbuffer_ref_head+nr, + ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head, + last ? " LAST" : ""); + } else { + list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { + if (capsnap->context == snapc) { + found = 1; + capsnap->dirty_pages -= nr; + last_snap = !capsnap->dirty_pages; + break; + } + } + BUG_ON(!found); + dout("put_wrbuffer_cap_refs on %p cap_snap %p " + " snap %lld %d/%d -> %d/%d %s%s\n", + inode, capsnap, capsnap->context->seq, + ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr, + ci->i_wrbuffer_ref, capsnap->dirty_pages, + last ? " (wrbuffer last)" : "", + last_snap ? " (capsnap last)" : ""); + } + + spin_unlock(&inode->i_lock); + + if (last) { + ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); + iput(inode); + } else if (last_snap) { + ceph_flush_snaps(ci); + wake_up(&ci->i_cap_wq); + } +} + +/* + * Handle a cap GRANT message from the MDS. (Note that a GRANT may + * actually be a revocation if it specifies a smaller cap set.) + * + * caller holds s_mutex. + * return value: + * 0 - ok + * 1 - check_caps on auth cap only (writeback) + * 2 - check_caps (ack revoke) + */ +static int handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant, + struct ceph_mds_session *session, + struct ceph_cap *cap, + struct ceph_buffer *xattr_buf) + __releases(inode->i_lock) + +{ + struct ceph_inode_info *ci = ceph_inode(inode); + int mds = session->s_mds; + int seq = le32_to_cpu(grant->seq); + int newcaps = le32_to_cpu(grant->caps); + int issued, implemented, used, wanted, dirty; + u64 size = le64_to_cpu(grant->size); + u64 max_size = le64_to_cpu(grant->max_size); + struct timespec mtime, atime, ctime; + int reply = 0; + int wake = 0; + int writeback = 0; + int revoked_rdcache = 0; + int invalidate_async = 0; + int tried_invalidate = 0; + int ret; + + dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n", + inode, cap, mds, seq, ceph_cap_string(newcaps)); + dout(" size %llu max_size %llu, i_size %llu\n", size, max_size, + inode->i_size); + + /* + * If CACHE is being revoked, and we have no dirty buffers, + * try to invalidate (once). (If there are dirty buffers, we + * will invalidate _after_ writeback.) + */ +restart: + if (((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) && + !ci->i_wrbuffer_ref && !tried_invalidate) { + dout("CACHE invalidation\n"); + spin_unlock(&inode->i_lock); + tried_invalidate = 1; + + ret = invalidate_inode_pages2(&inode->i_data); + spin_lock(&inode->i_lock); + if (ret < 0) { + /* there were locked pages.. invalidate later + in a separate thread. */ + if (ci->i_rdcache_revoking != ci->i_rdcache_gen) { + invalidate_async = 1; + ci->i_rdcache_revoking = ci->i_rdcache_gen; + } + } else { + /* we successfully invalidated those pages */ + revoked_rdcache = 1; + ci->i_rdcache_gen = 0; + ci->i_rdcache_revoking = 0; + } + goto restart; + } + + /* side effects now are allowed */ + + issued = __ceph_caps_issued(ci, &implemented); + issued |= implemented | __ceph_caps_dirty(ci); + + cap->gen = session->s_cap_gen; + + __check_cap_issue(ci, cap, newcaps); + + if ((issued & CEPH_CAP_AUTH_EXCL) == 0) { + inode->i_mode = le32_to_cpu(grant->mode); + inode->i_uid = le32_to_cpu(grant->uid); + inode->i_gid = le32_to_cpu(grant->gid); + dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode, + inode->i_uid, inode->i_gid); + } + + if ((issued & CEPH_CAP_LINK_EXCL) == 0) + inode->i_nlink = le32_to_cpu(grant->nlink); + + if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && grant->xattr_len) { + int len = le32_to_cpu(grant->xattr_len); + u64 version = le64_to_cpu(grant->xattr_version); + + if (version > ci->i_xattrs.version) { + dout(" got new xattrs v%llu on %p len %d\n", + version, inode, len); + if (ci->i_xattrs.blob) + ceph_buffer_put(ci->i_xattrs.blob); + ci->i_xattrs.blob = ceph_buffer_get(xattr_buf); + ci->i_xattrs.version = version; + } + } + + /* size/ctime/mtime/atime? */ + ceph_fill_file_size(inode, issued, + le32_to_cpu(grant->truncate_seq), + le64_to_cpu(grant->truncate_size), size); + ceph_decode_timespec(&mtime, &grant->mtime); + ceph_decode_timespec(&atime, &grant->atime); + ceph_decode_timespec(&ctime, &grant->ctime); + ceph_fill_file_time(inode, issued, + le32_to_cpu(grant->time_warp_seq), &ctime, &mtime, + &atime); + + /* max size increase? */ + if (max_size != ci->i_max_size) { + dout("max_size %lld -> %llu\n", ci->i_max_size, max_size); + ci->i_max_size = max_size; + if (max_size >= ci->i_wanted_max_size) { + ci->i_wanted_max_size = 0; /* reset */ + ci->i_requested_max_size = 0; + } + wake = 1; + } + + /* check cap bits */ + wanted = __ceph_caps_wanted(ci); + used = __ceph_caps_used(ci); + dirty = __ceph_caps_dirty(ci); + dout(" my wanted = %s, used = %s, dirty %s\n", + ceph_cap_string(wanted), + ceph_cap_string(used), + ceph_cap_string(dirty)); + if (wanted != le32_to_cpu(grant->wanted)) { + dout("mds wanted %s -> %s\n", + ceph_cap_string(le32_to_cpu(grant->wanted)), + ceph_cap_string(wanted)); + grant->wanted = cpu_to_le32(wanted); + } + + cap->seq = seq; + + /* file layout may have changed */ + ci->i_layout = grant->layout; + + /* revocation, grant, or no-op? */ + if (cap->issued & ~newcaps) { + dout("revocation: %s -> %s\n", ceph_cap_string(cap->issued), + ceph_cap_string(newcaps)); + if ((used & ~newcaps) & CEPH_CAP_FILE_BUFFER) + writeback = 1; /* will delay ack */ + else if (dirty & ~newcaps) + reply = 1; /* initiate writeback in check_caps */ + else if (((used & ~newcaps) & CEPH_CAP_FILE_CACHE) == 0 || + revoked_rdcache) + reply = 2; /* send revoke ack in check_caps */ + cap->issued = newcaps; + } else if (cap->issued == newcaps) { + dout("caps unchanged: %s -> %s\n", + ceph_cap_string(cap->issued), ceph_cap_string(newcaps)); + } else { + dout("grant: %s -> %s\n", ceph_cap_string(cap->issued), + ceph_cap_string(newcaps)); + cap->issued = newcaps; + cap->implemented |= newcaps; /* add bits only, to + * avoid stepping on a + * pending revocation */ + wake = 1; + } + + spin_unlock(&inode->i_lock); + if (writeback) { + /* + * queue inode for writeback: we can't actually call + * filemap_write_and_wait, etc. from message handler + * context. + */ + dout("queueing %p for writeback\n", inode); + if (ceph_queue_writeback(inode)) + igrab(inode); + } + if (invalidate_async) { + dout("queueing %p for page invalidation\n", inode); + if (ceph_queue_page_invalidation(inode)) + igrab(inode); + } + if (wake) + wake_up(&ci->i_cap_wq); + return reply; +} + +/* + * Handle FLUSH_ACK from MDS, indicating that metadata we sent to the + * MDS has been safely committed. + */ +static void handle_cap_flush_ack(struct inode *inode, + struct ceph_mds_caps *m, + struct ceph_mds_session *session, + struct ceph_cap *cap) + __releases(inode->i_lock) +{ + struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_mds_client *mdsc = &ceph_client(inode->i_sb)->mdsc; + unsigned seq = le32_to_cpu(m->seq); + int dirty = le32_to_cpu(m->dirty); + int cleaned = 0; + u64 flush_tid = le64_to_cpu(m->client_tid); + int old_dirty = 0, new_dirty = 0; + int i; + + for (i = 0; i < CEPH_CAP_BITS; i++) + if ((dirty & (1 << i)) && + flush_tid == ci->i_cap_flush_tid[i]) + cleaned |= 1 << i; + + dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s," + " flushing %s -> %s\n", + inode, session->s_mds, seq, ceph_cap_string(dirty), + ceph_cap_string(cleaned), ceph_cap_string(ci->i_flushing_caps), + ceph_cap_string(ci->i_flushing_caps & ~cleaned)); + + if (ci->i_flushing_caps == (ci->i_flushing_caps & ~cleaned)) + goto out; + + old_dirty = ci->i_dirty_caps | ci->i_flushing_caps; + ci->i_flushing_caps &= ~cleaned; + new_dirty = ci->i_dirty_caps | ci->i_flushing_caps; + + spin_lock(&mdsc->cap_dirty_lock); + if (ci->i_flushing_caps == 0) { + list_del_init(&ci->i_flushing_item); + if (!list_empty(&session->s_cap_flushing)) + dout(" mds%d still flushing cap on %p\n", + session->s_mds, + &list_entry(session->s_cap_flushing.next, + struct ceph_inode_info, + i_flushing_item)->vfs_inode); + mdsc->num_cap_flushing--; + wake_up(&mdsc->cap_flushing_wq); + dout(" inode %p now !flushing\n", inode); + } + if (old_dirty && !new_dirty) { + dout(" inode %p now clean\n", inode); + list_del_init(&ci->i_dirty_item); + } + spin_unlock(&mdsc->cap_dirty_lock); + wake_up(&ci->i_cap_wq); + +out: + spin_unlock(&inode->i_lock); + if (old_dirty && !new_dirty) + iput(inode); +} + +/* + * Handle FLUSHSNAP_ACK. MDS has flushed snap data to disk and we can + * throw away our cap_snap. + * + * Caller hold s_mutex. + */ +static void handle_cap_flushsnap_ack(struct inode *inode, + struct ceph_mds_caps *m, + struct ceph_mds_session *session) +{ + struct ceph_inode_info *ci = ceph_inode(inode); + u64 follows = le64_to_cpu(m->snap_follows); + u64 flush_tid = le64_to_cpu(m->client_tid); + struct ceph_cap_snap *capsnap; + int drop = 0; + + dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n", + inode, ci, session->s_mds, follows); + + spin_lock(&inode->i_lock); + list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { + if (capsnap->follows == follows) { + if (capsnap->flush_tid != flush_tid) { + dout(" cap_snap %p follows %lld tid %lld !=" + " %lld\n", capsnap, follows, + flush_tid, capsnap->flush_tid); + break; + } + WARN_ON(capsnap->dirty_pages || capsnap->writing); + dout(" removing cap_snap %p follows %lld\n", + capsnap, follows); + ceph_put_snap_context(capsnap->context); + list_del(&capsnap->ci_item); + list_del(&capsnap->flushing_item); + ceph_put_cap_snap(capsnap); + drop = 1; + break; + } else { + dout(" skipping cap_snap %p follows %lld\n", + capsnap, capsnap->follows); + } + } + spin_unlock(&inode->i_lock); + if (drop) + iput(inode); +} + +/* + * Handle TRUNC from MDS, indicating file truncation. + * + * caller hold s_mutex. + */ +static void handle_cap_trunc(struct inode *inode, + struct ceph_mds_caps *trunc, + struct ceph_mds_session *session) + __releases(inode->i_lock) +{ + struct ceph_inode_info *ci = ceph_inode(inode); + int mds = session->s_mds; + int seq = le32_to_cpu(trunc->seq); + u32 truncate_seq = le32_to_cpu(trunc->truncate_seq); + u64 truncate_size = le64_to_cpu(trunc->truncate_size); + u64 size = le64_to_cpu(trunc->size); + int implemented = 0; + int dirty = __ceph_caps_dirty(ci); + int issued = __ceph_caps_issued(ceph_inode(inode), &implemented); + int queue_trunc = 0; + + issued |= implemented | dirty; + + dout("handle_cap_trunc inode %p mds%d seq %d to %lld seq %d\n", + inode, mds, seq, truncate_size, truncate_seq); + queue_trunc = ceph_fill_file_size(inode, issued, + truncate_seq, truncate_size, size); + spin_unlock(&inode->i_lock); + + if (queue_trunc) + if (queue_work(ceph_client(inode->i_sb)->trunc_wq, + &ci->i_vmtruncate_work)) + igrab(inode); +} + +/* + * Handle EXPORT from MDS. Cap is being migrated _from_ this mds to a + * different one. If we are the most recent migration we've seen (as + * indicated by mseq), make note of the migrating cap bits for the + * duration (until we see the corresponding IMPORT). + * + * caller holds s_mutex + */ +static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex, + struct ceph_mds_session *session) +{ + struct ceph_inode_info *ci = ceph_inode(inode); + int mds = session->s_mds; + unsigned mseq = le32_to_cpu(ex->migrate_seq); + struct ceph_cap *cap = NULL, *t; + struct rb_node *p; + int remember = 1; + + dout("handle_cap_export inode %p ci %p mds%d mseq %d\n", + inode, ci, mds, mseq); + + spin_lock(&inode->i_lock); + + /* make sure we haven't seen a higher mseq */ + for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { + t = rb_entry(p, struct ceph_cap, ci_node); + if (ceph_seq_cmp(t->mseq, mseq) > 0) { + dout(" higher mseq on cap from mds%d\n", + t->session->s_mds); + remember = 0; + } + if (t->session->s_mds == mds) + cap = t; + } + + if (cap) { + if (remember) { + /* make note */ + ci->i_cap_exporting_mds = mds; + ci->i_cap_exporting_mseq = mseq; + ci->i_cap_exporting_issued = cap->issued; + } + __ceph_remove_cap(cap, NULL); + } else { + WARN_ON(!cap); + } + + spin_unlock(&inode->i_lock); +} + +/* + * Handle cap IMPORT. If there are temp bits from an older EXPORT, + * clean them up. + * + * caller holds s_mutex. + */ +static void handle_cap_import(struct ceph_mds_client *mdsc, + struct inode *inode, struct ceph_mds_caps *im, + struct ceph_mds_session *session, + void *snaptrace, int snaptrace_len) +{ + struct ceph_inode_info *ci = ceph_inode(inode); + int mds = session->s_mds; + unsigned issued = le32_to_cpu(im->caps); + unsigned wanted = le32_to_cpu(im->wanted); + unsigned seq = le32_to_cpu(im->seq); + unsigned mseq = le32_to_cpu(im->migrate_seq); + u64 realmino = le64_to_cpu(im->realm); + u64 cap_id = le64_to_cpu(im->cap_id); + + if (ci->i_cap_exporting_mds >= 0 && + ceph_seq_cmp(ci->i_cap_exporting_mseq, mseq) < 0) { + dout("handle_cap_import inode %p ci %p mds%d mseq %d" + " - cleared exporting from mds%d\n", + inode, ci, mds, mseq, + ci->i_cap_exporting_mds); + ci->i_cap_exporting_issued = 0; + ci->i_cap_exporting_mseq = 0; + ci->i_cap_exporting_mds = -1; + } else { + dout("handle_cap_import inode %p ci %p mds%d mseq %d\n", + inode, ci, mds, mseq); + } + + down_write(&mdsc->snap_rwsem); + ceph_update_snap_trace(mdsc, snaptrace, snaptrace+snaptrace_len, + false); + downgrade_write(&mdsc->snap_rwsem); + ceph_add_cap(inode, session, cap_id, -1, + issued, wanted, seq, mseq, realmino, CEPH_CAP_FLAG_AUTH, + NULL /* no caps context */); + try_flush_caps(inode, session, NULL); + up_read(&mdsc->snap_rwsem); +} + +/* + * Handle a caps message from the MDS. + * + * Identify the appropriate session, inode, and call the right handler + * based on the cap op. + */ +void ceph_handle_caps(struct ceph_mds_session *session, + struct ceph_msg *msg) +{ + struct ceph_mds_client *mdsc = session->s_mdsc; + struct super_block *sb = mdsc->client->sb; + struct inode *inode; + struct ceph_cap *cap; + struct ceph_mds_caps *h; + int mds = le64_to_cpu(msg->hdr.src.name.num); + int op; + u32 seq; + struct ceph_vino vino; + u64 cap_id; + u64 size, max_size; + int check_caps = 0; + int r; + + dout("handle_caps from mds%d\n", mds); + + /* decode */ + if (msg->front.iov_len < sizeof(*h)) + goto bad; + h = msg->front.iov_base; + op = le32_to_cpu(h->op); + vino.ino = le64_to_cpu(h->ino); + vino.snap = CEPH_NOSNAP; + cap_id = le64_to_cpu(h->cap_id); + seq = le32_to_cpu(h->seq); + size = le64_to_cpu(h->size); + max_size = le64_to_cpu(h->max_size); + + mutex_lock(&session->s_mutex); + session->s_seq++; + dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq, + (unsigned)seq); + + /* lookup ino */ + inode = ceph_find_inode(sb, vino); + dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino, + vino.snap, inode); + if (!inode) { + dout(" i don't have ino %llx\n", vino.ino); + goto done; + } + + /* these will work even if we don't have a cap yet */ + switch (op) { + case CEPH_CAP_OP_FLUSHSNAP_ACK: + handle_cap_flushsnap_ack(inode, h, session); + goto done; + + case CEPH_CAP_OP_EXPORT: + handle_cap_export(inode, h, session); + goto done; + + case CEPH_CAP_OP_IMPORT: + handle_cap_import(mdsc, inode, h, session, + msg->middle, + le32_to_cpu(h->snap_trace_len)); + check_caps = 1; /* we may have sent a RELEASE to the old auth */ + goto done; + } + + /* the rest require a cap */ + spin_lock(&inode->i_lock); + cap = __get_cap_for_mds(ceph_inode(inode), mds); + if (!cap) { + dout("no cap on %p ino %llx.%llx from mds%d, releasing\n", + inode, ceph_ino(inode), ceph_snap(inode), mds); + spin_unlock(&inode->i_lock); + goto done; + } + + /* note that each of these drops i_lock for us */ + switch (op) { + case CEPH_CAP_OP_REVOKE: + case CEPH_CAP_OP_GRANT: + r = handle_cap_grant(inode, h, session, cap, msg->middle); + if (r == 1) + ceph_check_caps(ceph_inode(inode), + CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY, + session); + else if (r == 2) + ceph_check_caps(ceph_inode(inode), + CHECK_CAPS_NODELAY, + session); + break; + + case CEPH_CAP_OP_FLUSH_ACK: + handle_cap_flush_ack(inode, h, session, cap); + break; + + case CEPH_CAP_OP_TRUNC: + handle_cap_trunc(inode, h, session); + break; + + default: + spin_unlock(&inode->i_lock); + pr_err("ceph_handle_caps: unknown cap op %d %s\n", op, + ceph_cap_op_name(op)); + } + +done: + mutex_unlock(&session->s_mutex); + + if (check_caps) + ceph_check_caps(ceph_inode(inode), CHECK_CAPS_NODELAY, NULL); + if (inode) + iput(inode); + return; + +bad: + pr_err("ceph_handle_caps: corrupt message\n"); + return; +} + +/* + * Delayed work handler to process end of delayed cap release LRU list. + */ +void ceph_check_delayed_caps(struct ceph_mds_client *mdsc, int flushdirty) +{ + struct ceph_inode_info *ci; + int flags = CHECK_CAPS_NODELAY; + + if (flushdirty) + flags |= CHECK_CAPS_FLUSH; + + dout("check_delayed_caps\n"); + while (1) { + spin_lock(&mdsc->cap_delay_lock); + if (list_empty(&mdsc->cap_delay_list)) + break; + ci = list_first_entry(&mdsc->cap_delay_list, + struct ceph_inode_info, + i_cap_delay_list); + if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 && + time_before(jiffies, ci->i_hold_caps_max)) + break; + list_del_init(&ci->i_cap_delay_list); + spin_unlock(&mdsc->cap_delay_lock); + dout("check_delayed_caps on %p\n", &ci->vfs_inode); + ceph_check_caps(ci, flags, NULL); + } + spin_unlock(&mdsc->cap_delay_lock); +} + +/* + * Drop open file reference. If we were the last open file, + * we may need to release capabilities to the MDS (or schedule + * their delayed release). + */ +void ceph_put_fmode(struct ceph_inode_info *ci, int fmode) +{ + struct inode *inode = &ci->vfs_inode; + int last = 0; + + spin_lock(&inode->i_lock); + dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode, + ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1); + BUG_ON(ci->i_nr_by_mode[fmode] == 0); + if (--ci->i_nr_by_mode[fmode] == 0) + last++; + spin_unlock(&inode->i_lock); + + if (last && ci->i_vino.snap == CEPH_NOSNAP) + ceph_check_caps(ci, 0, NULL); +} + +/* + * Helpers for embedding cap and dentry lease releases into mds + * requests. + * + * @force is used by dentry_release (below) to force inclusion of a + * record for the directory inode, even when there aren't any caps to + * drop. + */ +int ceph_encode_inode_release(void **p, struct inode *inode, + int mds, int drop, int unless, int force) +{ + struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_cap *cap; + struct ceph_mds_request_release *rel = *p; + int ret = 0; + + dout("encode_inode_release %p mds%d drop %s unless %s\n", inode, + mds, ceph_cap_string(drop), ceph_cap_string(unless)); + + spin_lock(&inode->i_lock); + cap = __get_cap_for_mds(ci, mds); + if (cap && __cap_is_valid(cap)) { + if (force || + ((cap->issued & drop) && + (cap->issued & unless) == 0)) { + if ((cap->issued & drop) && + (cap->issued & unless) == 0) { + dout("encode_inode_release %p cap %p %s -> " + "%s\n", inode, cap, + ceph_cap_string(cap->issued), + ceph_cap_string(cap->issued & ~drop)); + cap->issued &= ~drop; + cap->implemented &= ~drop; + if (ci->i_ceph_flags & CEPH_I_NODELAY) { + int wanted = __ceph_caps_wanted(ci); + dout(" wanted %s -> %s (act %s)\n", + ceph_cap_string(cap->mds_wanted), + ceph_cap_string(cap->mds_wanted & + ~wanted), + ceph_cap_string(wanted)); + cap->mds_wanted &= wanted; + } + } else { + dout("encode_inode_release %p cap %p %s" + " (force)\n", inode, cap, + ceph_cap_string(cap->issued)); + } + + rel->ino = cpu_to_le64(ceph_ino(inode)); + rel->cap_id = cpu_to_le64(cap->cap_id); + rel->seq = cpu_to_le32(cap->seq); + rel->issue_seq = cpu_to_le32(cap->issue_seq), + rel->mseq = cpu_to_le32(cap->mseq); + rel->caps = cpu_to_le32(cap->issued); + rel->wanted = cpu_to_le32(cap->mds_wanted); + rel->dname_len = 0; + rel->dname_seq = 0; + *p += sizeof(*rel); + ret = 1; + } else { + dout("encode_inode_release %p cap %p %s\n", + inode, cap, ceph_cap_string(cap->issued)); + } + } + spin_unlock(&inode->i_lock); + return ret; +} + +int ceph_encode_dentry_release(void **p, struct dentry *dentry, + int mds, int drop, int unless) +{ + struct inode *dir = dentry->d_parent->d_inode; + struct ceph_mds_request_release *rel = *p; + struct ceph_dentry_info *di = ceph_dentry(dentry); + int force = 0; + int ret; + + /* + * force an record for the directory caps if we have a dentry lease. + * this is racy (can't take i_lock and d_lock together), but it + * doesn't have to be perfect; the mds will revoke anything we don't + * release. + */ + spin_lock(&dentry->d_lock); + if (di->lease_session && di->lease_session->s_mds == mds) + force = 1; + spin_unlock(&dentry->d_lock); + + ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force); + + spin_lock(&dentry->d_lock); + if (ret && di->lease_session && di->lease_session->s_mds == mds) { + dout("encode_dentry_release %p mds%d seq %d\n", + dentry, mds, (int)di->lease_seq); + rel->dname_len = cpu_to_le32(dentry->d_name.len); + memcpy(*p, dentry->d_name.name, dentry->d_name.len); + *p += dentry->d_name.len; + rel->dname_seq = cpu_to_le32(di->lease_seq); + } + spin_unlock(&dentry->d_lock); + return ret; +} -- cgit v1.2.3 From 963b61eb041e8850807d95f8d7a4c6a454c45000 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 6 Oct 2009 11:31:12 -0700 Subject: ceph: snapshot management Ceph snapshots rely on client cooperation in determining which operations apply to which snapshots, and appropriately flushing snapshotted data and metadata back to the OSD and MDS clusters. Because snapshots apply to subtrees of the file hierarchy and can be created at any time, there is a fair bit of bookkeeping required to make this work. Portions of the hierarchy that belong to the same set of snapshots are described by a single 'snap realm.' A 'snap context' describes the set of snapshots that exist for a given file or directory. Signed-off-by: Sage Weil --- fs/ceph/snap.c | 897 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 897 insertions(+) create mode 100644 fs/ceph/snap.c diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c new file mode 100644 index 000000000000..2e3cb40b7e48 --- /dev/null +++ b/fs/ceph/snap.c @@ -0,0 +1,897 @@ +#include "ceph_debug.h" + +#include +#include + +#include "super.h" +#include "decode.h" + +/* + * Snapshots in ceph are driven in large part by cooperation from the + * client. In contrast to local file systems or file servers that + * implement snapshots at a single point in the system, ceph's + * distributed access to storage requires clients to help decide + * whether a write logically occurs before or after a recently created + * snapshot. + * + * This provides a perfect instantanous client-wide snapshot. Between + * clients, however, snapshots may appear to be applied at slightly + * different points in time, depending on delays in delivering the + * snapshot notification. + * + * Snapshots are _not_ file system-wide. Instead, each snapshot + * applies to the subdirectory nested beneath some directory. This + * effectively divides the hierarchy into multiple "realms," where all + * of the files contained by each realm share the same set of + * snapshots. An individual realm's snap set contains snapshots + * explicitly created on that realm, as well as any snaps in its + * parent's snap set _after_ the point at which the parent became it's + * parent (due to, say, a rename). Similarly, snaps from prior parents + * during the time intervals during which they were the parent are included. + * + * The client is spared most of this detail, fortunately... it must only + * maintains a hierarchy of realms reflecting the current parent/child + * realm relationship, and for each realm has an explicit list of snaps + * inherited from prior parents. + * + * A snap_realm struct is maintained for realms containing every inode + * with an open cap in the system. (The needed snap realm information is + * provided by the MDS whenever a cap is issued, i.e., on open.) A 'seq' + * version number is used to ensure that as realm parameters change (new + * snapshot, new parent, etc.) the client's realm hierarchy is updated. + * + * The realm hierarchy drives the generation of a 'snap context' for each + * realm, which simply lists the resulting set of snaps for the realm. This + * is attached to any writes sent to OSDs. + */ +/* + * Unfortunately error handling is a bit mixed here. If we get a snap + * update, but don't have enough memory to update our realm hierarchy, + * it's not clear what we can do about it (besides complaining to the + * console). + */ + + +/* + * increase ref count for the realm + * + * caller must hold snap_rwsem for write. + */ +void ceph_get_snap_realm(struct ceph_mds_client *mdsc, + struct ceph_snap_realm *realm) +{ + dout("get_realm %p %d -> %d\n", realm, + atomic_read(&realm->nref), atomic_read(&realm->nref)+1); + /* + * since we _only_ increment realm refs or empty the empty + * list with snap_rwsem held, adjusting the empty list here is + * safe. we do need to protect against concurrent empty list + * additions, however. + */ + if (atomic_read(&realm->nref) == 0) { + spin_lock(&mdsc->snap_empty_lock); + list_del_init(&realm->empty_item); + spin_unlock(&mdsc->snap_empty_lock); + } + + atomic_inc(&realm->nref); +} + +/* + * create and get the realm rooted at @ino and bump its ref count. + * + * caller must hold snap_rwsem for write. + */ +static struct ceph_snap_realm *ceph_create_snap_realm( + struct ceph_mds_client *mdsc, + u64 ino) +{ + struct ceph_snap_realm *realm; + + realm = kzalloc(sizeof(*realm), GFP_NOFS); + if (!realm) + return ERR_PTR(-ENOMEM); + + radix_tree_insert(&mdsc->snap_realms, ino, realm); + + atomic_set(&realm->nref, 0); /* tree does not take a ref */ + realm->ino = ino; + INIT_LIST_HEAD(&realm->children); + INIT_LIST_HEAD(&realm->child_item); + INIT_LIST_HEAD(&realm->empty_item); + INIT_LIST_HEAD(&realm->inodes_with_caps); + spin_lock_init(&realm->inodes_with_caps_lock); + dout("create_snap_realm %llx %p\n", realm->ino, realm); + return realm; +} + +/* + * find and get (if found) the realm rooted at @ino and bump its ref count. + * + * caller must hold snap_rwsem for write. + */ +struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc, + u64 ino) +{ + struct ceph_snap_realm *realm; + + realm = radix_tree_lookup(&mdsc->snap_realms, ino); + if (realm) + dout("lookup_snap_realm %llx %p\n", realm->ino, realm); + return realm; +} + +static void __put_snap_realm(struct ceph_mds_client *mdsc, + struct ceph_snap_realm *realm); + +/* + * called with snap_rwsem (write) + */ +static void __destroy_snap_realm(struct ceph_mds_client *mdsc, + struct ceph_snap_realm *realm) +{ + dout("__destroy_snap_realm %p %llx\n", realm, realm->ino); + + radix_tree_delete(&mdsc->snap_realms, realm->ino); + + if (realm->parent) { + list_del_init(&realm->child_item); + __put_snap_realm(mdsc, realm->parent); + } + + kfree(realm->prior_parent_snaps); + kfree(realm->snaps); + ceph_put_snap_context(realm->cached_context); + kfree(realm); +} + +/* + * caller holds snap_rwsem (write) + */ +static void __put_snap_realm(struct ceph_mds_client *mdsc, + struct ceph_snap_realm *realm) +{ + dout("__put_snap_realm %llx %p %d -> %d\n", realm->ino, realm, + atomic_read(&realm->nref), atomic_read(&realm->nref)-1); + if (atomic_dec_and_test(&realm->nref)) + __destroy_snap_realm(mdsc, realm); +} + +/* + * caller needn't hold any locks + */ +void ceph_put_snap_realm(struct ceph_mds_client *mdsc, + struct ceph_snap_realm *realm) +{ + dout("put_snap_realm %llx %p %d -> %d\n", realm->ino, realm, + atomic_read(&realm->nref), atomic_read(&realm->nref)-1); + if (!atomic_dec_and_test(&realm->nref)) + return; + + if (down_write_trylock(&mdsc->snap_rwsem)) { + __destroy_snap_realm(mdsc, realm); + up_write(&mdsc->snap_rwsem); + } else { + spin_lock(&mdsc->snap_empty_lock); + list_add(&mdsc->snap_empty, &realm->empty_item); + spin_unlock(&mdsc->snap_empty_lock); + } +} + +/* + * Clean up any realms whose ref counts have dropped to zero. Note + * that this does not include realms who were created but not yet + * used. + * + * Called under snap_rwsem (write) + */ +static void __cleanup_empty_realms(struct ceph_mds_client *mdsc) +{ + struct ceph_snap_realm *realm; + + spin_lock(&mdsc->snap_empty_lock); + while (!list_empty(&mdsc->snap_empty)) { + realm = list_first_entry(&mdsc->snap_empty, + struct ceph_snap_realm, empty_item); + list_del(&realm->empty_item); + spin_unlock(&mdsc->snap_empty_lock); + __destroy_snap_realm(mdsc, realm); + spin_lock(&mdsc->snap_empty_lock); + } + spin_unlock(&mdsc->snap_empty_lock); +} + +void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc) +{ + down_write(&mdsc->snap_rwsem); + __cleanup_empty_realms(mdsc); + up_write(&mdsc->snap_rwsem); +} + +/* + * adjust the parent realm of a given @realm. adjust child list, and parent + * pointers, and ref counts appropriately. + * + * return true if parent was changed, 0 if unchanged, <0 on error. + * + * caller must hold snap_rwsem for write. + */ +static int adjust_snap_realm_parent(struct ceph_mds_client *mdsc, + struct ceph_snap_realm *realm, + u64 parentino) +{ + struct ceph_snap_realm *parent; + + if (realm->parent_ino == parentino) + return 0; + + parent = ceph_lookup_snap_realm(mdsc, parentino); + if (IS_ERR(parent)) + return PTR_ERR(parent); + if (!parent) { + parent = ceph_create_snap_realm(mdsc, parentino); + if (IS_ERR(parent)) + return PTR_ERR(parent); + } + dout("adjust_snap_realm_parent %llx %p: %llx %p -> %llx %p\n", + realm->ino, realm, realm->parent_ino, realm->parent, + parentino, parent); + if (realm->parent) { + list_del_init(&realm->child_item); + ceph_put_snap_realm(mdsc, realm->parent); + } + realm->parent_ino = parentino; + realm->parent = parent; + ceph_get_snap_realm(mdsc, parent); + list_add(&realm->child_item, &parent->children); + return 1; +} + + +static int cmpu64_rev(const void *a, const void *b) +{ + if (*(u64 *)a < *(u64 *)b) + return 1; + if (*(u64 *)a > *(u64 *)b) + return -1; + return 0; +} + +/* + * build the snap context for a given realm. + */ +static int build_snap_context(struct ceph_snap_realm *realm) +{ + struct ceph_snap_realm *parent = realm->parent; + struct ceph_snap_context *snapc; + int err = 0; + int i; + int num = realm->num_prior_parent_snaps + realm->num_snaps; + + /* + * build parent context, if it hasn't been built. + * conservatively estimate that all parent snaps might be + * included by us. + */ + if (parent) { + if (!parent->cached_context) { + err = build_snap_context(parent); + if (err) + goto fail; + } + num += parent->cached_context->num_snaps; + } + + /* do i actually need to update? not if my context seq + matches realm seq, and my parents' does to. (this works + because we rebuild_snap_realms() works _downward_ in + hierarchy after each update.) */ + if (realm->cached_context && + realm->cached_context->seq <= realm->seq && + (!parent || + realm->cached_context->seq <= parent->cached_context->seq)) { + dout("build_snap_context %llx %p: %p seq %lld (%d snaps)" + " (unchanged)\n", + realm->ino, realm, realm->cached_context, + realm->cached_context->seq, + realm->cached_context->num_snaps); + return 0; + } + + /* alloc new snap context */ + err = -ENOMEM; + if (num > ULONG_MAX / sizeof(u64) - sizeof(*snapc)) + goto fail; + snapc = kzalloc(sizeof(*snapc) + num*sizeof(u64), GFP_NOFS); + if (!snapc) + goto fail; + atomic_set(&snapc->nref, 1); + + /* build (reverse sorted) snap vector */ + num = 0; + snapc->seq = realm->seq; + if (parent) { + /* include any of parent's snaps occuring _after_ my + parent became my parent */ + for (i = 0; i < parent->cached_context->num_snaps; i++) + if (parent->cached_context->snaps[i] >= + realm->parent_since) + snapc->snaps[num++] = + parent->cached_context->snaps[i]; + if (parent->cached_context->seq > snapc->seq) + snapc->seq = parent->cached_context->seq; + } + memcpy(snapc->snaps + num, realm->snaps, + sizeof(u64)*realm->num_snaps); + num += realm->num_snaps; + memcpy(snapc->snaps + num, realm->prior_parent_snaps, + sizeof(u64)*realm->num_prior_parent_snaps); + num += realm->num_prior_parent_snaps; + + sort(snapc->snaps, num, sizeof(u64), cmpu64_rev, NULL); + snapc->num_snaps = num; + dout("build_snap_context %llx %p: %p seq %lld (%d snaps)\n", + realm->ino, realm, snapc, snapc->seq, snapc->num_snaps); + + if (realm->cached_context) + ceph_put_snap_context(realm->cached_context); + realm->cached_context = snapc; + return 0; + +fail: + /* + * if we fail, clear old (incorrect) cached_context... hopefully + * we'll have better luck building it later + */ + if (realm->cached_context) { + ceph_put_snap_context(realm->cached_context); + realm->cached_context = NULL; + } + pr_err("build_snap_context %llx %p fail %d\n", realm->ino, + realm, err); + return err; +} + +/* + * rebuild snap context for the given realm and all of its children. + */ +static void rebuild_snap_realms(struct ceph_snap_realm *realm) +{ + struct ceph_snap_realm *child; + + dout("rebuild_snap_realms %llx %p\n", realm->ino, realm); + build_snap_context(realm); + + list_for_each_entry(child, &realm->children, child_item) + rebuild_snap_realms(child); +} + + +/* + * helper to allocate and decode an array of snapids. free prior + * instance, if any. + */ +static int dup_array(u64 **dst, __le64 *src, int num) +{ + int i; + + kfree(*dst); + if (num) { + *dst = kcalloc(num, sizeof(u64), GFP_NOFS); + if (!*dst) + return -ENOMEM; + for (i = 0; i < num; i++) + (*dst)[i] = get_unaligned_le64(src + i); + } else { + *dst = NULL; + } + return 0; +} + + +/* + * When a snapshot is applied, the size/mtime inode metadata is queued + * in a ceph_cap_snap (one for each snapshot) until writeback + * completes and the metadata can be flushed back to the MDS. + * + * However, if a (sync) write is currently in-progress when we apply + * the snapshot, we have to wait until the write succeeds or fails + * (and a final size/mtime is known). In this case the + * cap_snap->writing = 1, and is said to be "pending." When the write + * finishes, we __ceph_finish_cap_snap(). + * + * Caller must hold snap_rwsem for read (i.e., the realm topology won't + * change). + */ +void ceph_queue_cap_snap(struct ceph_inode_info *ci, + struct ceph_snap_context *snapc) +{ + struct inode *inode = &ci->vfs_inode; + struct ceph_cap_snap *capsnap; + int used; + + capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS); + if (!capsnap) { + pr_err("ENOMEM allocating ceph_cap_snap on %p\n", inode); + return; + } + + spin_lock(&inode->i_lock); + used = __ceph_caps_used(ci); + if (__ceph_have_pending_cap_snap(ci)) { + /* there is no point in queuing multiple "pending" cap_snaps, + as no new writes are allowed to start when pending, so any + writes in progress now were started before the previous + cap_snap. lucky us. */ + dout("queue_cap_snap %p snapc %p seq %llu used %d" + " already pending\n", inode, snapc, snapc->seq, used); + kfree(capsnap); + } else if (ci->i_wrbuffer_ref_head || (used & CEPH_CAP_FILE_WR)) { + igrab(inode); + + atomic_set(&capsnap->nref, 1); + capsnap->ci = ci; + INIT_LIST_HEAD(&capsnap->ci_item); + INIT_LIST_HEAD(&capsnap->flushing_item); + + capsnap->follows = snapc->seq - 1; + capsnap->context = ceph_get_snap_context(snapc); + capsnap->issued = __ceph_caps_issued(ci, NULL); + capsnap->dirty = __ceph_caps_dirty(ci); + + capsnap->mode = inode->i_mode; + capsnap->uid = inode->i_uid; + capsnap->gid = inode->i_gid; + + /* fixme? */ + capsnap->xattr_blob = NULL; + capsnap->xattr_len = 0; + + /* dirty page count moved from _head to this cap_snap; + all subsequent writes page dirties occur _after_ this + snapshot. */ + capsnap->dirty_pages = ci->i_wrbuffer_ref_head; + ci->i_wrbuffer_ref_head = 0; + ceph_put_snap_context(ci->i_head_snapc); + ci->i_head_snapc = NULL; + list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps); + + if (used & CEPH_CAP_FILE_WR) { + dout("queue_cap_snap %p cap_snap %p snapc %p" + " seq %llu used WR, now pending\n", inode, + capsnap, snapc, snapc->seq); + capsnap->writing = 1; + } else { + /* note mtime, size NOW. */ + __ceph_finish_cap_snap(ci, capsnap); + } + } else { + dout("queue_cap_snap %p nothing dirty|writing\n", inode); + kfree(capsnap); + } + + spin_unlock(&inode->i_lock); +} + +/* + * Finalize the size, mtime for a cap_snap.. that is, settle on final values + * to be used for the snapshot, to be flushed back to the mds. + * + * If capsnap can now be flushed, add to snap_flush list, and return 1. + * + * Caller must hold i_lock. + */ +int __ceph_finish_cap_snap(struct ceph_inode_info *ci, + struct ceph_cap_snap *capsnap) +{ + struct inode *inode = &ci->vfs_inode; + struct ceph_mds_client *mdsc = &ceph_client(inode->i_sb)->mdsc; + + BUG_ON(capsnap->writing); + capsnap->size = inode->i_size; + capsnap->mtime = inode->i_mtime; + capsnap->atime = inode->i_atime; + capsnap->ctime = inode->i_ctime; + capsnap->time_warp_seq = ci->i_time_warp_seq; + if (capsnap->dirty_pages) { + dout("finish_cap_snap %p cap_snap %p snapc %p %llu s=%llu " + "still has %d dirty pages\n", inode, capsnap, + capsnap->context, capsnap->context->seq, + capsnap->size, capsnap->dirty_pages); + return 0; + } + dout("finish_cap_snap %p cap_snap %p snapc %p %llu s=%llu clean\n", + inode, capsnap, capsnap->context, + capsnap->context->seq, capsnap->size); + + spin_lock(&mdsc->snap_flush_lock); + list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list); + spin_unlock(&mdsc->snap_flush_lock); + return 1; /* caller may want to ceph_flush_snaps */ +} + + +/* + * Parse and apply a snapblob "snap trace" from the MDS. This specifies + * the snap realm parameters from a given realm and all of its ancestors, + * up to the root. + * + * Caller must hold snap_rwsem for write. + */ +int ceph_update_snap_trace(struct ceph_mds_client *mdsc, + void *p, void *e, bool deletion) +{ + struct ceph_mds_snap_realm *ri; /* encoded */ + __le64 *snaps; /* encoded */ + __le64 *prior_parent_snaps; /* encoded */ + struct ceph_snap_realm *realm; + int invalidate = 0; + int err = -ENOMEM; + + dout("update_snap_trace deletion=%d\n", deletion); +more: + ceph_decode_need(&p, e, sizeof(*ri), bad); + ri = p; + p += sizeof(*ri); + ceph_decode_need(&p, e, sizeof(u64)*(le32_to_cpu(ri->num_snaps) + + le32_to_cpu(ri->num_prior_parent_snaps)), bad); + snaps = p; + p += sizeof(u64) * le32_to_cpu(ri->num_snaps); + prior_parent_snaps = p; + p += sizeof(u64) * le32_to_cpu(ri->num_prior_parent_snaps); + + realm = ceph_lookup_snap_realm(mdsc, le64_to_cpu(ri->ino)); + if (IS_ERR(realm)) { + err = PTR_ERR(realm); + goto fail; + } + if (!realm) { + realm = ceph_create_snap_realm(mdsc, le64_to_cpu(ri->ino)); + if (IS_ERR(realm)) { + err = PTR_ERR(realm); + goto fail; + } + } + + if (le64_to_cpu(ri->seq) > realm->seq) { + dout("update_snap_trace updating %llx %p %lld -> %lld\n", + realm->ino, realm, realm->seq, le64_to_cpu(ri->seq)); + /* + * if the realm seq has changed, queue a cap_snap for every + * inode with open caps. we do this _before_ we update + * the realm info so that we prepare for writeback under the + * _previous_ snap context. + * + * ...unless it's a snap deletion! + */ + if (!deletion) { + struct ceph_inode_info *ci; + struct inode *lastinode = NULL; + + spin_lock(&realm->inodes_with_caps_lock); + list_for_each_entry(ci, &realm->inodes_with_caps, + i_snap_realm_item) { + struct inode *inode = igrab(&ci->vfs_inode); + if (!inode) + continue; + spin_unlock(&realm->inodes_with_caps_lock); + if (lastinode) + iput(lastinode); + lastinode = inode; + ceph_queue_cap_snap(ci, realm->cached_context); + spin_lock(&realm->inodes_with_caps_lock); + } + spin_unlock(&realm->inodes_with_caps_lock); + if (lastinode) + iput(lastinode); + dout("update_snap_trace cap_snaps queued\n"); + } + + } else { + dout("update_snap_trace %llx %p seq %lld unchanged\n", + realm->ino, realm, realm->seq); + } + + /* ensure the parent is correct */ + err = adjust_snap_realm_parent(mdsc, realm, le64_to_cpu(ri->parent)); + if (err < 0) + goto fail; + invalidate += err; + + if (le64_to_cpu(ri->seq) > realm->seq) { + /* update realm parameters, snap lists */ + realm->seq = le64_to_cpu(ri->seq); + realm->created = le64_to_cpu(ri->created); + realm->parent_since = le64_to_cpu(ri->parent_since); + + realm->num_snaps = le32_to_cpu(ri->num_snaps); + err = dup_array(&realm->snaps, snaps, realm->num_snaps); + if (err < 0) + goto fail; + + realm->num_prior_parent_snaps = + le32_to_cpu(ri->num_prior_parent_snaps); + err = dup_array(&realm->prior_parent_snaps, prior_parent_snaps, + realm->num_prior_parent_snaps); + if (err < 0) + goto fail; + + invalidate = 1; + } else if (!realm->cached_context) { + invalidate = 1; + } + + dout("done with %llx %p, invalidated=%d, %p %p\n", realm->ino, + realm, invalidate, p, e); + + if (p < e) + goto more; + + /* invalidate when we reach the _end_ (root) of the trace */ + if (invalidate) + rebuild_snap_realms(realm); + + __cleanup_empty_realms(mdsc); + return 0; + +bad: + err = -EINVAL; +fail: + pr_err("update_snap_trace error %d\n", err); + return err; +} + + +/* + * Send any cap_snaps that are queued for flush. Try to carry + * s_mutex across multiple snap flushes to avoid locking overhead. + * + * Caller holds no locks. + */ +static void flush_snaps(struct ceph_mds_client *mdsc) +{ + struct ceph_inode_info *ci; + struct inode *inode; + struct ceph_mds_session *session = NULL; + + dout("flush_snaps\n"); + spin_lock(&mdsc->snap_flush_lock); + while (!list_empty(&mdsc->snap_flush_list)) { + ci = list_first_entry(&mdsc->snap_flush_list, + struct ceph_inode_info, i_snap_flush_item); + inode = &ci->vfs_inode; + igrab(inode); + spin_unlock(&mdsc->snap_flush_lock); + spin_lock(&inode->i_lock); + __ceph_flush_snaps(ci, &session); + spin_unlock(&inode->i_lock); + iput(inode); + spin_lock(&mdsc->snap_flush_lock); + } + spin_unlock(&mdsc->snap_flush_lock); + + if (session) { + mutex_unlock(&session->s_mutex); + ceph_put_mds_session(session); + } + dout("flush_snaps done\n"); +} + + +/* + * Handle a snap notification from the MDS. + * + * This can take two basic forms: the simplest is just a snap creation + * or deletion notification on an existing realm. This should update the + * realm and its children. + * + * The more difficult case is realm creation, due to snap creation at a + * new point in the file hierarchy, or due to a rename that moves a file or + * directory into another realm. + */ +void ceph_handle_snap(struct ceph_mds_client *mdsc, + struct ceph_msg *msg) +{ + struct super_block *sb = mdsc->client->sb; + struct ceph_mds_session *session; + int mds; + u64 split; + int op; + int trace_len; + struct ceph_snap_realm *realm = NULL; + void *p = msg->front.iov_base; + void *e = p + msg->front.iov_len; + struct ceph_mds_snap_head *h; + int num_split_inos, num_split_realms; + __le64 *split_inos = NULL, *split_realms = NULL; + int i; + int locked_rwsem = 0; + + if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS) + return; + mds = le64_to_cpu(msg->hdr.src.name.num); + + /* decode */ + if (msg->front.iov_len < sizeof(*h)) + goto bad; + h = p; + op = le32_to_cpu(h->op); + split = le64_to_cpu(h->split); /* non-zero if we are splitting an + * existing realm */ + num_split_inos = le32_to_cpu(h->num_split_inos); + num_split_realms = le32_to_cpu(h->num_split_realms); + trace_len = le32_to_cpu(h->trace_len); + p += sizeof(*h); + + dout("handle_snap from mds%d op %s split %llx tracelen %d\n", mds, + ceph_snap_op_name(op), split, trace_len); + + /* find session */ + mutex_lock(&mdsc->mutex); + session = __ceph_lookup_mds_session(mdsc, mds); + mutex_unlock(&mdsc->mutex); + if (!session) { + dout("WTF, got snap but no session for mds%d\n", mds); + return; + } + + mutex_lock(&session->s_mutex); + session->s_seq++; + mutex_unlock(&session->s_mutex); + + down_write(&mdsc->snap_rwsem); + locked_rwsem = 1; + + if (op == CEPH_SNAP_OP_SPLIT) { + struct ceph_mds_snap_realm *ri; + + /* + * A "split" breaks part of an existing realm off into + * a new realm. The MDS provides a list of inodes + * (with caps) and child realms that belong to the new + * child. + */ + split_inos = p; + p += sizeof(u64) * num_split_inos; + split_realms = p; + p += sizeof(u64) * num_split_realms; + ceph_decode_need(&p, e, sizeof(*ri), bad); + /* we will peek at realm info here, but will _not_ + * advance p, as the realm update will occur below in + * ceph_update_snap_trace. */ + ri = p; + + realm = ceph_lookup_snap_realm(mdsc, split); + if (IS_ERR(realm)) + goto out; + if (!realm) { + realm = ceph_create_snap_realm(mdsc, split); + if (IS_ERR(realm)) + goto out; + } + ceph_get_snap_realm(mdsc, realm); + + dout("splitting snap_realm %llx %p\n", realm->ino, realm); + for (i = 0; i < num_split_inos; i++) { + struct ceph_vino vino = { + .ino = le64_to_cpu(split_inos[i]), + .snap = CEPH_NOSNAP, + }; + struct inode *inode = ceph_find_inode(sb, vino); + struct ceph_inode_info *ci; + + if (!inode) + continue; + ci = ceph_inode(inode); + + spin_lock(&inode->i_lock); + if (!ci->i_snap_realm) + goto skip_inode; + /* + * If this inode belongs to a realm that was + * created after our new realm, we experienced + * a race (due to another split notifications + * arriving from a different MDS). So skip + * this inode. + */ + if (ci->i_snap_realm->created > + le64_to_cpu(ri->created)) { + dout(" leaving %p in newer realm %llx %p\n", + inode, ci->i_snap_realm->ino, + ci->i_snap_realm); + goto skip_inode; + } + dout(" will move %p to split realm %llx %p\n", + inode, realm->ino, realm); + /* + * Remove the inode from the realm's inode + * list, but don't add it to the new realm + * yet. We don't want the cap_snap to be + * queued (again) by ceph_update_snap_trace() + * below. Queue it _now_, under the old context. + */ + list_del_init(&ci->i_snap_realm_item); + spin_unlock(&inode->i_lock); + + ceph_queue_cap_snap(ci, + ci->i_snap_realm->cached_context); + + iput(inode); + continue; + +skip_inode: + spin_unlock(&inode->i_lock); + iput(inode); + } + + /* we may have taken some of the old realm's children. */ + for (i = 0; i < num_split_realms; i++) { + struct ceph_snap_realm *child = + ceph_lookup_snap_realm(mdsc, + le64_to_cpu(split_realms[i])); + if (IS_ERR(child)) + continue; + if (!child) + continue; + adjust_snap_realm_parent(mdsc, child, realm->ino); + } + } + + /* + * update using the provided snap trace. if we are deleting a + * snap, we can avoid queueing cap_snaps. + */ + ceph_update_snap_trace(mdsc, p, e, + op == CEPH_SNAP_OP_DESTROY); + + if (op == CEPH_SNAP_OP_SPLIT) { + /* + * ok, _now_ add the inodes into the new realm. + */ + for (i = 0; i < num_split_inos; i++) { + struct ceph_vino vino = { + .ino = le64_to_cpu(split_inos[i]), + .snap = CEPH_NOSNAP, + }; + struct inode *inode = ceph_find_inode(sb, vino); + struct ceph_inode_info *ci; + + if (!inode) + continue; + ci = ceph_inode(inode); + spin_lock(&inode->i_lock); + if (!ci->i_snap_realm) + goto split_skip_inode; + ceph_put_snap_realm(mdsc, ci->i_snap_realm); + spin_lock(&realm->inodes_with_caps_lock); + list_add(&ci->i_snap_realm_item, + &realm->inodes_with_caps); + ci->i_snap_realm = realm; + spin_unlock(&realm->inodes_with_caps_lock); + ceph_get_snap_realm(mdsc, realm); +split_skip_inode: + spin_unlock(&inode->i_lock); + iput(inode); + } + + /* we took a reference when we created the realm, above */ + ceph_put_snap_realm(mdsc, realm); + } + + __cleanup_empty_realms(mdsc); + + up_write(&mdsc->snap_rwsem); + + flush_snaps(mdsc); + return; + +bad: + pr_err("corrupt snap message from mds%d\n", mds); +out: + if (locked_rwsem) + up_write(&mdsc->snap_rwsem); + return; +} + + + -- cgit v1.2.3 From 31b8006e1d79e127a776c9414e3e0b5f9508047e Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 6 Oct 2009 11:31:13 -0700 Subject: ceph: messenger library A generic message passing library is used to communicate with all other components in the Ceph file system. The messenger library provides ordered, reliable delivery of messages between two nodes in the system. This implementation is based on TCP. Signed-off-by: Sage Weil --- fs/ceph/decode.h | 136 ++++ fs/ceph/messenger.c | 2019 +++++++++++++++++++++++++++++++++++++++++++++++++++ fs/ceph/messenger.h | 243 +++++++ 3 files changed, 2398 insertions(+) create mode 100644 fs/ceph/decode.h create mode 100644 fs/ceph/messenger.c create mode 100644 fs/ceph/messenger.h diff --git a/fs/ceph/decode.h b/fs/ceph/decode.h new file mode 100644 index 000000000000..fc2769df062d --- /dev/null +++ b/fs/ceph/decode.h @@ -0,0 +1,136 @@ +#ifndef __CEPH_DECODE_H +#define __CEPH_DECODE_H + +#include + +/* + * in all cases, + * void **p pointer to position pointer + * void *end pointer to end of buffer (last byte + 1) + */ + +/* + * bounds check input. + */ +#define ceph_decode_need(p, end, n, bad) \ + do { \ + if (unlikely(*(p) + (n) > (end))) \ + goto bad; \ + } while (0) + +#define ceph_decode_64(p, v) \ + do { \ + v = get_unaligned_le64(*(p)); \ + *(p) += sizeof(u64); \ + } while (0) +#define ceph_decode_32(p, v) \ + do { \ + v = get_unaligned_le32(*(p)); \ + *(p) += sizeof(u32); \ + } while (0) +#define ceph_decode_16(p, v) \ + do { \ + v = get_unaligned_le16(*(p)); \ + *(p) += sizeof(u16); \ + } while (0) +#define ceph_decode_8(p, v) \ + do { \ + v = *(u8 *)*(p); \ + (*p)++; \ + } while (0) + +#define ceph_decode_copy(p, pv, n) \ + do { \ + memcpy(pv, *(p), n); \ + *(p) += n; \ + } while (0) + +/* bounds check too */ +#define ceph_decode_64_safe(p, end, v, bad) \ + do { \ + ceph_decode_need(p, end, sizeof(u64), bad); \ + ceph_decode_64(p, v); \ + } while (0) +#define ceph_decode_32_safe(p, end, v, bad) \ + do { \ + ceph_decode_need(p, end, sizeof(u32), bad); \ + ceph_decode_32(p, v); \ + } while (0) +#define ceph_decode_16_safe(p, end, v, bad) \ + do { \ + ceph_decode_need(p, end, sizeof(u16), bad); \ + ceph_decode_16(p, v); \ + } while (0) + +#define ceph_decode_copy_safe(p, end, pv, n, bad) \ + do { \ + ceph_decode_need(p, end, n, bad); \ + ceph_decode_copy(p, pv, n); \ + } while (0) + +/* + * struct ceph_timespec <-> struct timespec + */ +#define ceph_decode_timespec(ts, tv) \ + do { \ + (ts)->tv_sec = le32_to_cpu((tv)->tv_sec); \ + (ts)->tv_nsec = le32_to_cpu((tv)->tv_nsec); \ + } while (0) +#define ceph_encode_timespec(tv, ts) \ + do { \ + (tv)->tv_sec = cpu_to_le32((ts)->tv_sec); \ + (tv)->tv_nsec = cpu_to_le32((ts)->tv_nsec); \ + } while (0) + + +/* + * encoders + */ +#define ceph_encode_64(p, v) \ + do { \ + put_unaligned_le64(v, (__le64 *)*(p)); \ + *(p) += sizeof(u64); \ + } while (0) +#define ceph_encode_32(p, v) \ + do { \ + put_unaligned_le32(v, (__le32 *)*(p)); \ + *(p) += sizeof(u32); \ + } while (0) +#define ceph_encode_16(p, v) \ + do { \ + put_unaligned_le16(v), (__le16 *)*(p)); \ + *(p) += sizeof(u16); \ + } while (0) +#define ceph_encode_8(p, v) \ + do { \ + *(u8 *)*(p) = v; \ + (*(p))++; \ + } while (0) + +/* + * filepath, string encoders + */ +static inline void ceph_encode_filepath(void **p, void *end, + u64 ino, const char *path) +{ + u32 len = path ? strlen(path) : 0; + BUG_ON(*p + sizeof(ino) + sizeof(len) + len > end); + ceph_encode_64(p, ino); + ceph_encode_32(p, len); + if (len) + memcpy(*p, path, len); + *p += len; +} + +static inline void ceph_encode_string(void **p, void *end, + const char *s, u32 len) +{ + BUG_ON(*p + sizeof(len) + len > end); + ceph_encode_32(p, len); + if (len) + memcpy(*p, s, len); + *p += len; +} + + +#endif diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c new file mode 100644 index 000000000000..63f7f1359385 --- /dev/null +++ b/fs/ceph/messenger.c @@ -0,0 +1,2019 @@ +#include "ceph_debug.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "super.h" +#include "messenger.h" + +/* + * Ceph uses the messenger to exchange ceph_msg messages with other + * hosts in the system. The messenger provides ordered and reliable + * delivery. We tolerate TCP disconnects by reconnecting (with + * exponential backoff) in the case of a fault (disconnection, bad + * crc, protocol error). Acks allow sent messages to be discarded by + * the sender. + */ + +/* static tag bytes (protocol control messages) */ +static char tag_msg = CEPH_MSGR_TAG_MSG; +static char tag_ack = CEPH_MSGR_TAG_ACK; +static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE; + + +static void queue_con(struct ceph_connection *con); +static void con_work(struct work_struct *); +static void ceph_fault(struct ceph_connection *con); + +const char *ceph_name_type_str(int t) +{ + switch (t) { + case CEPH_ENTITY_TYPE_MON: return "mon"; + case CEPH_ENTITY_TYPE_MDS: return "mds"; + case CEPH_ENTITY_TYPE_OSD: return "osd"; + case CEPH_ENTITY_TYPE_CLIENT: return "client"; + case CEPH_ENTITY_TYPE_ADMIN: return "admin"; + default: return "???"; + } +} + +/* + * nicely render a sockaddr as a string. + */ +#define MAX_ADDR_STR 20 +static char addr_str[MAX_ADDR_STR][40]; +static DEFINE_SPINLOCK(addr_str_lock); +static int last_addr_str; + +const char *pr_addr(const struct sockaddr_storage *ss) +{ + int i; + char *s; + struct sockaddr_in *in4 = (void *)ss; + unsigned char *quad = (void *)&in4->sin_addr.s_addr; + struct sockaddr_in6 *in6 = (void *)ss; + + spin_lock(&addr_str_lock); + i = last_addr_str++; + if (last_addr_str == MAX_ADDR_STR) + last_addr_str = 0; + spin_unlock(&addr_str_lock); + s = addr_str[i]; + + switch (ss->ss_family) { + case AF_INET: + sprintf(s, "%u.%u.%u.%u:%u", + (unsigned int)quad[0], + (unsigned int)quad[1], + (unsigned int)quad[2], + (unsigned int)quad[3], + (unsigned int)ntohs(in4->sin_port)); + break; + + case AF_INET6: + sprintf(s, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x:%u", + in6->sin6_addr.s6_addr16[0], + in6->sin6_addr.s6_addr16[1], + in6->sin6_addr.s6_addr16[2], + in6->sin6_addr.s6_addr16[3], + in6->sin6_addr.s6_addr16[4], + in6->sin6_addr.s6_addr16[5], + in6->sin6_addr.s6_addr16[6], + in6->sin6_addr.s6_addr16[7], + (unsigned int)ntohs(in6->sin6_port)); + break; + + default: + sprintf(s, "(unknown sockaddr family %d)", (int)ss->ss_family); + } + + return s; +} + +/* + * work queue for all reading and writing to/from the socket. + */ +struct workqueue_struct *ceph_msgr_wq; + +int __init ceph_msgr_init(void) +{ + ceph_msgr_wq = create_workqueue("ceph-msgr"); + if (IS_ERR(ceph_msgr_wq)) { + int ret = PTR_ERR(ceph_msgr_wq); + pr_err("msgr_init failed to create workqueue: %d\n", ret); + ceph_msgr_wq = NULL; + return ret; + } + return 0; +} + +void ceph_msgr_exit(void) +{ + destroy_workqueue(ceph_msgr_wq); +} + +/* + * socket callback functions + */ + +/* data available on socket, or listen socket received a connect */ +static void ceph_data_ready(struct sock *sk, int count_unused) +{ + struct ceph_connection *con = + (struct ceph_connection *)sk->sk_user_data; + if (sk->sk_state != TCP_CLOSE_WAIT) { + dout("ceph_data_ready on %p state = %lu, queueing work\n", + con, con->state); + queue_con(con); + } +} + +/* socket has buffer space for writing */ +static void ceph_write_space(struct sock *sk) +{ + struct ceph_connection *con = + (struct ceph_connection *)sk->sk_user_data; + + /* only queue to workqueue if there is data we want to write. */ + if (test_bit(WRITE_PENDING, &con->state)) { + dout("ceph_write_space %p queueing write work\n", con); + queue_con(con); + } else { + dout("ceph_write_space %p nothing to write\n", con); + } + + /* since we have our own write_space, clear the SOCK_NOSPACE flag */ + clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); +} + +/* socket's state has changed */ +static void ceph_state_change(struct sock *sk) +{ + struct ceph_connection *con = + (struct ceph_connection *)sk->sk_user_data; + + dout("ceph_state_change %p state = %lu sk_state = %u\n", + con, con->state, sk->sk_state); + + if (test_bit(CLOSED, &con->state)) + return; + + switch (sk->sk_state) { + case TCP_CLOSE: + dout("ceph_state_change TCP_CLOSE\n"); + case TCP_CLOSE_WAIT: + dout("ceph_state_change TCP_CLOSE_WAIT\n"); + if (test_and_set_bit(SOCK_CLOSED, &con->state) == 0) { + if (test_bit(CONNECTING, &con->state)) + con->error_msg = "connection failed"; + else + con->error_msg = "socket closed"; + queue_con(con); + } + break; + case TCP_ESTABLISHED: + dout("ceph_state_change TCP_ESTABLISHED\n"); + queue_con(con); + break; + } +} + +/* + * set up socket callbacks + */ +static void set_sock_callbacks(struct socket *sock, + struct ceph_connection *con) +{ + struct sock *sk = sock->sk; + sk->sk_user_data = (void *)con; + sk->sk_data_ready = ceph_data_ready; + sk->sk_write_space = ceph_write_space; + sk->sk_state_change = ceph_state_change; +} + + +/* + * socket helpers + */ + +/* + * initiate connection to a remote socket. + */ +static struct socket *ceph_tcp_connect(struct ceph_connection *con) +{ + struct sockaddr *paddr = (struct sockaddr *)&con->peer_addr.in_addr; + struct socket *sock; + int ret; + + BUG_ON(con->sock); + ret = sock_create_kern(AF_INET, SOCK_STREAM, IPPROTO_TCP, &sock); + if (ret) + return ERR_PTR(ret); + con->sock = sock; + sock->sk->sk_allocation = GFP_NOFS; + + set_sock_callbacks(sock, con); + + dout("connect %s\n", pr_addr(&con->peer_addr.in_addr)); + + ret = sock->ops->connect(sock, paddr, sizeof(*paddr), O_NONBLOCK); + if (ret == -EINPROGRESS) { + dout("connect %s EINPROGRESS sk_state = %u\n", + pr_addr(&con->peer_addr.in_addr), + sock->sk->sk_state); + ret = 0; + } + if (ret < 0) { + pr_err("connect %s error %d\n", + pr_addr(&con->peer_addr.in_addr), ret); + sock_release(sock); + con->sock = NULL; + con->error_msg = "connect error"; + } + + if (ret < 0) + return ERR_PTR(ret); + return sock; +} + +static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len) +{ + struct kvec iov = {buf, len}; + struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; + + return kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags); +} + +/* + * write something. @more is true if caller will be sending more data + * shortly. + */ +static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov, + size_t kvlen, size_t len, int more) +{ + struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; + + if (more) + msg.msg_flags |= MSG_MORE; + else + msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */ + + return kernel_sendmsg(sock, &msg, iov, kvlen, len); +} + + +/* + * Shutdown/close the socket for the given connection. + */ +static int con_close_socket(struct ceph_connection *con) +{ + int rc; + + dout("con_close_socket on %p sock %p\n", con, con->sock); + if (!con->sock) + return 0; + set_bit(SOCK_CLOSED, &con->state); + rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR); + sock_release(con->sock); + con->sock = NULL; + clear_bit(SOCK_CLOSED, &con->state); + return rc; +} + +/* + * Reset a connection. Discard all incoming and outgoing messages + * and clear *_seq state. + */ +static void ceph_msg_remove(struct ceph_msg *msg) +{ + list_del_init(&msg->list_head); + ceph_msg_put(msg); +} +static void ceph_msg_remove_list(struct list_head *head) +{ + while (!list_empty(head)) { + struct ceph_msg *msg = list_first_entry(head, struct ceph_msg, + list_head); + ceph_msg_remove(msg); + } +} + +static void reset_connection(struct ceph_connection *con) +{ + /* reset connection, out_queue, msg_ and connect_seq */ + /* discard existing out_queue and msg_seq */ + mutex_lock(&con->out_mutex); + ceph_msg_remove_list(&con->out_queue); + ceph_msg_remove_list(&con->out_sent); + + con->connect_seq = 0; + con->out_seq = 0; + con->out_msg = NULL; + con->in_seq = 0; + mutex_unlock(&con->out_mutex); +} + +/* + * mark a peer down. drop any open connections. + */ +void ceph_con_close(struct ceph_connection *con) +{ + dout("con_close %p peer %s\n", con, pr_addr(&con->peer_addr.in_addr)); + set_bit(CLOSED, &con->state); /* in case there's queued work */ + clear_bit(STANDBY, &con->state); /* avoid connect_seq bump */ + reset_connection(con); + queue_con(con); +} + +/* + * clean up connection state + */ +void ceph_con_shutdown(struct ceph_connection *con) +{ + dout("con_shutdown %p\n", con); + reset_connection(con); + set_bit(DEAD, &con->state); + con_close_socket(con); /* silently ignore errors */ +} + +/* + * Reopen a closed connection, with a new peer address. + */ +void ceph_con_open(struct ceph_connection *con, struct ceph_entity_addr *addr) +{ + dout("con_open %p %s\n", con, pr_addr(&addr->in_addr)); + set_bit(OPENING, &con->state); + clear_bit(CLOSED, &con->state); + memcpy(&con->peer_addr, addr, sizeof(*addr)); + queue_con(con); +} + +/* + * generic get/put + */ +struct ceph_connection *ceph_con_get(struct ceph_connection *con) +{ + dout("con_get %p nref = %d -> %d\n", con, + atomic_read(&con->nref), atomic_read(&con->nref) + 1); + if (atomic_inc_not_zero(&con->nref)) + return con; + return NULL; +} + +void ceph_con_put(struct ceph_connection *con) +{ + dout("con_put %p nref = %d -> %d\n", con, + atomic_read(&con->nref), atomic_read(&con->nref) - 1); + BUG_ON(atomic_read(&con->nref) == 0); + if (atomic_dec_and_test(&con->nref)) { + ceph_con_shutdown(con); + kfree(con); + } +} + +/* + * initialize a new connection. + */ +void ceph_con_init(struct ceph_messenger *msgr, struct ceph_connection *con) +{ + dout("con_init %p\n", con); + memset(con, 0, sizeof(*con)); + atomic_set(&con->nref, 1); + con->msgr = msgr; + mutex_init(&con->out_mutex); + INIT_LIST_HEAD(&con->out_queue); + INIT_LIST_HEAD(&con->out_sent); + INIT_DELAYED_WORK(&con->work, con_work); +} + + +/* + * We maintain a global counter to order connection attempts. Get + * a unique seq greater than @gt. + */ +static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt) +{ + u32 ret; + + spin_lock(&msgr->global_seq_lock); + if (msgr->global_seq < gt) + msgr->global_seq = gt; + ret = ++msgr->global_seq; + spin_unlock(&msgr->global_seq_lock); + return ret; +} + + +/* + * Prepare footer for currently outgoing message, and finish things + * off. Assumes out_kvec* are already valid.. we just add on to the end. + */ +static void prepare_write_message_footer(struct ceph_connection *con, int v) +{ + struct ceph_msg *m = con->out_msg; + + dout("prepare_write_message_footer %p\n", con); + con->out_kvec_is_msg = true; + con->out_kvec[v].iov_base = &m->footer; + con->out_kvec[v].iov_len = sizeof(m->footer); + con->out_kvec_bytes += sizeof(m->footer); + con->out_kvec_left++; + con->out_more = m->more_to_follow; + con->out_msg = NULL; /* we're done with this one */ +} + +/* + * Prepare headers for the next outgoing message. + */ +static void prepare_write_message(struct ceph_connection *con) +{ + struct ceph_msg *m; + int v = 0; + + con->out_kvec_bytes = 0; + con->out_kvec_is_msg = true; + + /* Sneak an ack in there first? If we can get it into the same + * TCP packet that's a good thing. */ + if (con->in_seq > con->in_seq_acked) { + con->in_seq_acked = con->in_seq; + con->out_kvec[v].iov_base = &tag_ack; + con->out_kvec[v++].iov_len = 1; + con->out_temp_ack = cpu_to_le64(con->in_seq_acked); + con->out_kvec[v].iov_base = &con->out_temp_ack; + con->out_kvec[v++].iov_len = sizeof(con->out_temp_ack); + con->out_kvec_bytes = 1 + sizeof(con->out_temp_ack); + } + + /* move message to sending/sent list */ + m = list_first_entry(&con->out_queue, + struct ceph_msg, list_head); + list_move_tail(&m->list_head, &con->out_sent); + con->out_msg = m; /* we don't bother taking a reference here. */ + + m->hdr.seq = cpu_to_le64(++con->out_seq); + + dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n", + m, con->out_seq, le16_to_cpu(m->hdr.type), + le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len), + le32_to_cpu(m->hdr.data_len), + m->nr_pages); + BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len); + + /* tag + hdr + front + middle */ + con->out_kvec[v].iov_base = &tag_msg; + con->out_kvec[v++].iov_len = 1; + con->out_kvec[v].iov_base = &m->hdr; + con->out_kvec[v++].iov_len = sizeof(m->hdr); + con->out_kvec[v++] = m->front; + if (m->middle) + con->out_kvec[v++] = m->middle->vec; + con->out_kvec_left = v; + con->out_kvec_bytes += 1 + sizeof(m->hdr) + m->front.iov_len + + (m->middle ? m->middle->vec.iov_len : 0); + con->out_kvec_cur = con->out_kvec; + + /* fill in crc (except data pages), footer */ + con->out_msg->hdr.crc = + cpu_to_le32(crc32c(0, (void *)&m->hdr, + sizeof(m->hdr) - sizeof(m->hdr.crc))); + con->out_msg->footer.flags = CEPH_MSG_FOOTER_COMPLETE; + con->out_msg->footer.front_crc = + cpu_to_le32(crc32c(0, m->front.iov_base, m->front.iov_len)); + if (m->middle) + con->out_msg->footer.middle_crc = + cpu_to_le32(crc32c(0, m->middle->vec.iov_base, + m->middle->vec.iov_len)); + else + con->out_msg->footer.middle_crc = 0; + con->out_msg->footer.data_crc = 0; + dout("prepare_write_message front_crc %u data_crc %u\n", + le32_to_cpu(con->out_msg->footer.front_crc), + le32_to_cpu(con->out_msg->footer.middle_crc)); + + /* is there a data payload? */ + if (le32_to_cpu(m->hdr.data_len) > 0) { + /* initialize page iterator */ + con->out_msg_pos.page = 0; + con->out_msg_pos.page_pos = + le16_to_cpu(m->hdr.data_off) & ~PAGE_MASK; + con->out_msg_pos.data_pos = 0; + con->out_msg_pos.did_page_crc = 0; + con->out_more = 1; /* data + footer will follow */ + } else { + /* no, queue up footer too and be done */ + prepare_write_message_footer(con, v); + } + + set_bit(WRITE_PENDING, &con->state); +} + +/* + * Prepare an ack. + */ +static void prepare_write_ack(struct ceph_connection *con) +{ + dout("prepare_write_ack %p %llu -> %llu\n", con, + con->in_seq_acked, con->in_seq); + con->in_seq_acked = con->in_seq; + + con->out_kvec[0].iov_base = &tag_ack; + con->out_kvec[0].iov_len = 1; + con->out_temp_ack = cpu_to_le64(con->in_seq_acked); + con->out_kvec[1].iov_base = &con->out_temp_ack; + con->out_kvec[1].iov_len = sizeof(con->out_temp_ack); + con->out_kvec_left = 2; + con->out_kvec_bytes = 1 + sizeof(con->out_temp_ack); + con->out_kvec_cur = con->out_kvec; + con->out_more = 1; /* more will follow.. eventually.. */ + set_bit(WRITE_PENDING, &con->state); +} + +/* + * Prepare to write keepalive byte. + */ +static void prepare_write_keepalive(struct ceph_connection *con) +{ + dout("prepare_write_keepalive %p\n", con); + con->out_kvec[0].iov_base = &tag_keepalive; + con->out_kvec[0].iov_len = 1; + con->out_kvec_left = 1; + con->out_kvec_bytes = 1; + con->out_kvec_cur = con->out_kvec; + set_bit(WRITE_PENDING, &con->state); +} + +/* + * Connection negotiation. + */ + +/* + * We connected to a peer and are saying hello. + */ +static void prepare_write_connect(struct ceph_messenger *msgr, + struct ceph_connection *con) +{ + int len = strlen(CEPH_BANNER); + unsigned global_seq = get_global_seq(con->msgr, 0); + int proto; + + switch (con->peer_name.type) { + case CEPH_ENTITY_TYPE_MON: + proto = CEPH_MONC_PROTOCOL; + break; + case CEPH_ENTITY_TYPE_OSD: + proto = CEPH_OSDC_PROTOCOL; + break; + case CEPH_ENTITY_TYPE_MDS: + proto = CEPH_MDSC_PROTOCOL; + break; + default: + BUG(); + } + + dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con, + con->connect_seq, global_seq, proto); + con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT); + con->out_connect.connect_seq = cpu_to_le32(con->connect_seq); + con->out_connect.global_seq = cpu_to_le32(global_seq); + con->out_connect.protocol_version = cpu_to_le32(proto); + con->out_connect.flags = 0; + if (test_bit(LOSSYTX, &con->state)) + con->out_connect.flags = CEPH_MSG_CONNECT_LOSSY; + + con->out_kvec[0].iov_base = CEPH_BANNER; + con->out_kvec[0].iov_len = len; + con->out_kvec[1].iov_base = &msgr->inst.addr; + con->out_kvec[1].iov_len = sizeof(msgr->inst.addr); + con->out_kvec[2].iov_base = &con->out_connect; + con->out_kvec[2].iov_len = sizeof(con->out_connect); + con->out_kvec_left = 3; + con->out_kvec_bytes = len + sizeof(msgr->inst.addr) + + sizeof(con->out_connect); + con->out_kvec_cur = con->out_kvec; + con->out_more = 0; + set_bit(WRITE_PENDING, &con->state); +} + +static void prepare_write_connect_retry(struct ceph_messenger *msgr, + struct ceph_connection *con) +{ + dout("prepare_write_connect_retry %p\n", con); + con->out_connect.connect_seq = cpu_to_le32(con->connect_seq); + con->out_connect.global_seq = + cpu_to_le32(get_global_seq(con->msgr, 0)); + + con->out_kvec[0].iov_base = &con->out_connect; + con->out_kvec[0].iov_len = sizeof(con->out_connect); + con->out_kvec_left = 1; + con->out_kvec_bytes = sizeof(con->out_connect); + con->out_kvec_cur = con->out_kvec; + con->out_more = 0; + set_bit(WRITE_PENDING, &con->state); +} + + +/* + * write as much of pending kvecs to the socket as we can. + * 1 -> done + * 0 -> socket full, but more to do + * <0 -> error + */ +static int write_partial_kvec(struct ceph_connection *con) +{ + int ret; + + dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes); + while (con->out_kvec_bytes > 0) { + ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur, + con->out_kvec_left, con->out_kvec_bytes, + con->out_more); + if (ret <= 0) + goto out; + con->out_kvec_bytes -= ret; + if (con->out_kvec_bytes == 0) + break; /* done */ + while (ret > 0) { + if (ret >= con->out_kvec_cur->iov_len) { + ret -= con->out_kvec_cur->iov_len; + con->out_kvec_cur++; + con->out_kvec_left--; + } else { + con->out_kvec_cur->iov_len -= ret; + con->out_kvec_cur->iov_base += ret; + ret = 0; + break; + } + } + } + con->out_kvec_left = 0; + con->out_kvec_is_msg = false; + ret = 1; +out: + dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con, + con->out_kvec_bytes, con->out_kvec_left, ret); + return ret; /* done! */ +} + +/* + * Write as much message data payload as we can. If we finish, queue + * up the footer. + * 1 -> done, footer is now queued in out_kvec[]. + * 0 -> socket full, but more to do + * <0 -> error + */ +static int write_partial_msg_pages(struct ceph_connection *con) +{ + struct ceph_msg *msg = con->out_msg; + unsigned data_len = le32_to_cpu(msg->hdr.data_len); + size_t len; + int crc = con->msgr->nocrc; + int ret; + + dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n", + con, con->out_msg, con->out_msg_pos.page, con->out_msg->nr_pages, + con->out_msg_pos.page_pos); + + while (con->out_msg_pos.page < con->out_msg->nr_pages) { + struct page *page = NULL; + void *kaddr = NULL; + + /* + * if we are calculating the data crc (the default), we need + * to map the page. if our pages[] has been revoked, use the + * zero page. + */ + if (msg->pages) { + page = msg->pages[con->out_msg_pos.page]; + if (crc) + kaddr = kmap(page); + } else { + page = con->msgr->zero_page; + if (crc) + kaddr = page_address(con->msgr->zero_page); + } + len = min((int)(PAGE_SIZE - con->out_msg_pos.page_pos), + (int)(data_len - con->out_msg_pos.data_pos)); + if (crc && !con->out_msg_pos.did_page_crc) { + void *base = kaddr + con->out_msg_pos.page_pos; + u32 tmpcrc = le32_to_cpu(con->out_msg->footer.data_crc); + + BUG_ON(kaddr == NULL); + con->out_msg->footer.data_crc = + cpu_to_le32(crc32c(tmpcrc, base, len)); + con->out_msg_pos.did_page_crc = 1; + } + + ret = kernel_sendpage(con->sock, page, + con->out_msg_pos.page_pos, len, + MSG_DONTWAIT | MSG_NOSIGNAL | + MSG_MORE); + + if (crc && msg->pages) + kunmap(page); + + if (ret <= 0) + goto out; + + con->out_msg_pos.data_pos += ret; + con->out_msg_pos.page_pos += ret; + if (ret == len) { + con->out_msg_pos.page_pos = 0; + con->out_msg_pos.page++; + con->out_msg_pos.did_page_crc = 0; + } + } + + dout("write_partial_msg_pages %p msg %p done\n", con, msg); + + /* prepare and queue up footer, too */ + if (!crc) + con->out_msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC; + con->out_kvec_bytes = 0; + con->out_kvec_left = 0; + con->out_kvec_cur = con->out_kvec; + prepare_write_message_footer(con, 0); + ret = 1; +out: + return ret; +} + +/* + * write some zeros + */ +static int write_partial_skip(struct ceph_connection *con) +{ + int ret; + + while (con->out_skip > 0) { + struct kvec iov = { + .iov_base = page_address(con->msgr->zero_page), + .iov_len = min(con->out_skip, (int)PAGE_CACHE_SIZE) + }; + + ret = ceph_tcp_sendmsg(con->sock, &iov, 1, iov.iov_len, 1); + if (ret <= 0) + goto out; + con->out_skip -= ret; + } + ret = 1; +out: + return ret; +} + +/* + * Prepare to read connection handshake, or an ack. + */ +static void prepare_read_connect(struct ceph_connection *con) +{ + dout("prepare_read_connect %p\n", con); + con->in_base_pos = 0; +} + +static void prepare_read_ack(struct ceph_connection *con) +{ + dout("prepare_read_ack %p\n", con); + con->in_base_pos = 0; +} + +static void prepare_read_tag(struct ceph_connection *con) +{ + dout("prepare_read_tag %p\n", con); + con->in_base_pos = 0; + con->in_tag = CEPH_MSGR_TAG_READY; +} + +/* + * Prepare to read a message. + */ +static int prepare_read_message(struct ceph_connection *con) +{ + dout("prepare_read_message %p\n", con); + BUG_ON(con->in_msg != NULL); + con->in_base_pos = 0; + con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0; + return 0; +} + + +static int read_partial(struct ceph_connection *con, + int *to, int size, void *object) +{ + *to += size; + while (con->in_base_pos < *to) { + int left = *to - con->in_base_pos; + int have = size - left; + int ret = ceph_tcp_recvmsg(con->sock, object + have, left); + if (ret <= 0) + return ret; + con->in_base_pos += ret; + } + return 1; +} + + +/* + * Read all or part of the connect-side handshake on a new connection + */ +static int read_partial_connect(struct ceph_connection *con) +{ + int ret, to = 0; + + dout("read_partial_connect %p at %d\n", con, con->in_base_pos); + + /* peer's banner */ + ret = read_partial(con, &to, strlen(CEPH_BANNER), con->in_banner); + if (ret <= 0) + goto out; + ret = read_partial(con, &to, sizeof(con->actual_peer_addr), + &con->actual_peer_addr); + if (ret <= 0) + goto out; + ret = read_partial(con, &to, sizeof(con->peer_addr_for_me), + &con->peer_addr_for_me); + if (ret <= 0) + goto out; + ret = read_partial(con, &to, sizeof(con->in_reply), &con->in_reply); + if (ret <= 0) + goto out; + + dout("read_partial_connect %p connect_seq = %u, global_seq = %u\n", + con, le32_to_cpu(con->in_reply.connect_seq), + le32_to_cpu(con->in_reply.global_seq)); +out: + return ret; +} + +/* + * Verify the hello banner looks okay. + */ +static int verify_hello(struct ceph_connection *con) +{ + if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) { + pr_err("connect to/from %s has bad banner\n", + pr_addr(&con->peer_addr.in_addr)); + con->error_msg = "protocol error, bad banner"; + return -1; + } + return 0; +} + +static bool addr_is_blank(struct sockaddr_storage *ss) +{ + switch (ss->ss_family) { + case AF_INET: + return ((struct sockaddr_in *)ss)->sin_addr.s_addr == 0; + case AF_INET6: + return + ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[0] == 0 && + ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[1] == 0 && + ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[2] == 0 && + ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[3] == 0; + } + return false; +} + +static int addr_port(struct sockaddr_storage *ss) +{ + switch (ss->ss_family) { + case AF_INET: + return ((struct sockaddr_in *)ss)->sin_port; + case AF_INET6: + return ((struct sockaddr_in6 *)ss)->sin6_port; + } + return 0; +} + +static void addr_set_port(struct sockaddr_storage *ss, int p) +{ + switch (ss->ss_family) { + case AF_INET: + ((struct sockaddr_in *)ss)->sin_port = htons(p); + case AF_INET6: + ((struct sockaddr_in6 *)ss)->sin6_port = htons(p); + } +} + +/* + * Parse an ip[:port] list into an addr array. Use the default + * monitor port if a port isn't specified. + */ +int ceph_parse_ips(const char *c, const char *end, + struct ceph_entity_addr *addr, + int max_count, int *count) +{ + int i; + const char *p = c; + + dout("parse_ips on '%.*s'\n", (int)(end-c), c); + for (i = 0; i < max_count; i++) { + const char *ipend; + struct sockaddr_storage *ss = &addr[i].in_addr; + struct sockaddr_in *in4 = (void *)ss; + struct sockaddr_in6 *in6 = (void *)ss; + int port; + + memset(ss, 0, sizeof(*ss)); + if (in4_pton(p, end - p, (u8 *)&in4->sin_addr.s_addr, + ',', &ipend)) { + ss->ss_family = AF_INET; + } else if (in6_pton(p, end - p, (u8 *)&in6->sin6_addr.s6_addr, + ',', &ipend)) { + ss->ss_family = AF_INET6; + } else { + goto bad; + } + p = ipend; + + /* port? */ + if (p < end && *p == ':') { + port = 0; + p++; + while (p < end && *p >= '0' && *p <= '9') { + port = (port * 10) + (*p - '0'); + p++; + } + if (port > 65535 || port == 0) + goto bad; + } else { + port = CEPH_MON_PORT; + } + + addr_set_port(ss, port); + + dout("parse_ips got %s\n", pr_addr(ss)); + + if (p == end) + break; + if (*p != ',') + goto bad; + p++; + } + + if (p != end) + goto bad; + + if (count) + *count = i + 1; + return 0; + +bad: + pr_err("parse_ips bad ip '%s'\n", c); + return -EINVAL; +} + +static int process_connect(struct ceph_connection *con) +{ + dout("process_connect on %p tag %d\n", con, (int)con->in_tag); + + if (verify_hello(con) < 0) + return -1; + + /* + * Make sure the other end is who we wanted. note that the other + * end may not yet know their ip address, so if it's 0.0.0.0, give + * them the benefit of the doubt. + */ + if (!ceph_entity_addr_is_local(&con->peer_addr, + &con->actual_peer_addr) && + !(addr_is_blank(&con->actual_peer_addr.in_addr) && + con->actual_peer_addr.nonce == con->peer_addr.nonce)) { + pr_err("wrong peer, want %s/%d, " + "got %s/%d, wtf\n", + pr_addr(&con->peer_addr.in_addr), + con->peer_addr.nonce, + pr_addr(&con->actual_peer_addr.in_addr), + con->actual_peer_addr.nonce); + con->error_msg = "protocol error, wrong peer"; + return -1; + } + + /* + * did we learn our address? + */ + if (addr_is_blank(&con->msgr->inst.addr.in_addr)) { + int port = addr_port(&con->msgr->inst.addr.in_addr); + + memcpy(&con->msgr->inst.addr.in_addr, + &con->peer_addr_for_me.in_addr, + sizeof(con->peer_addr_for_me.in_addr)); + addr_set_port(&con->msgr->inst.addr.in_addr, port); + dout("process_connect learned my addr is %s\n", + pr_addr(&con->msgr->inst.addr.in_addr)); + } + + switch (con->in_reply.tag) { + case CEPH_MSGR_TAG_BADPROTOVER: + dout("process_connect got BADPROTOVER my %d != their %d\n", + le32_to_cpu(con->out_connect.protocol_version), + le32_to_cpu(con->in_reply.protocol_version)); + pr_err("%s%lld %s protocol version mismatch," + " my %d != server's %d\n", + ENTITY_NAME(con->peer_name), + pr_addr(&con->peer_addr.in_addr), + le32_to_cpu(con->out_connect.protocol_version), + le32_to_cpu(con->in_reply.protocol_version)); + con->error_msg = "protocol version mismatch"; + if (con->ops->bad_proto) + con->ops->bad_proto(con); + reset_connection(con); + set_bit(CLOSED, &con->state); /* in case there's queued work */ + return -1; + + + case CEPH_MSGR_TAG_RESETSESSION: + /* + * If we connected with a large connect_seq but the peer + * has no record of a session with us (no connection, or + * connect_seq == 0), they will send RESETSESION to indicate + * that they must have reset their session, and may have + * dropped messages. + */ + dout("process_connect got RESET peer seq %u\n", + le32_to_cpu(con->in_connect.connect_seq)); + pr_err("%s%lld %s connection reset\n", + ENTITY_NAME(con->peer_name), + pr_addr(&con->peer_addr.in_addr)); + reset_connection(con); + prepare_write_connect_retry(con->msgr, con); + prepare_read_connect(con); + + /* Tell ceph about it. */ + pr_info("reset on %s%lld\n", ENTITY_NAME(con->peer_name)); + if (con->ops->peer_reset) + con->ops->peer_reset(con); + break; + + case CEPH_MSGR_TAG_RETRY_SESSION: + /* + * If we sent a smaller connect_seq than the peer has, try + * again with a larger value. + */ + dout("process_connect got RETRY my seq = %u, peer_seq = %u\n", + le32_to_cpu(con->out_connect.connect_seq), + le32_to_cpu(con->in_connect.connect_seq)); + con->connect_seq = le32_to_cpu(con->in_connect.connect_seq); + prepare_write_connect_retry(con->msgr, con); + prepare_read_connect(con); + break; + + case CEPH_MSGR_TAG_RETRY_GLOBAL: + /* + * If we sent a smaller global_seq than the peer has, try + * again with a larger value. + */ + dout("process_connect got RETRY_GLOBAL my %u, peer_gseq = %u\n", + con->peer_global_seq, + le32_to_cpu(con->in_connect.global_seq)); + get_global_seq(con->msgr, + le32_to_cpu(con->in_connect.global_seq)); + prepare_write_connect_retry(con->msgr, con); + prepare_read_connect(con); + break; + + case CEPH_MSGR_TAG_READY: + clear_bit(CONNECTING, &con->state); + if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY) + set_bit(LOSSYRX, &con->state); + con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq); + con->connect_seq++; + dout("process_connect got READY gseq %d cseq %d (%d)\n", + con->peer_global_seq, + le32_to_cpu(con->in_reply.connect_seq), + con->connect_seq); + WARN_ON(con->connect_seq != + le32_to_cpu(con->in_reply.connect_seq)); + + con->delay = 0; /* reset backoff memory */ + prepare_read_tag(con); + break; + + case CEPH_MSGR_TAG_WAIT: + /* + * If there is a connection race (we are opening + * connections to each other), one of us may just have + * to WAIT. This shouldn't happen if we are the + * client. + */ + pr_err("process_connect peer connecting WAIT\n"); + + default: + pr_err("connect protocol error, will retry\n"); + con->error_msg = "protocol error, garbage tag during connect"; + return -1; + } + return 0; +} + + +/* + * read (part of) an ack + */ +static int read_partial_ack(struct ceph_connection *con) +{ + int to = 0; + + return read_partial(con, &to, sizeof(con->in_temp_ack), + &con->in_temp_ack); +} + + +/* + * We can finally discard anything that's been acked. + */ +static void process_ack(struct ceph_connection *con) +{ + struct ceph_msg *m; + u64 ack = le64_to_cpu(con->in_temp_ack); + u64 seq; + + mutex_lock(&con->out_mutex); + while (!list_empty(&con->out_sent)) { + m = list_first_entry(&con->out_sent, struct ceph_msg, + list_head); + seq = le64_to_cpu(m->hdr.seq); + if (seq > ack) + break; + dout("got ack for seq %llu type %d at %p\n", seq, + le16_to_cpu(m->hdr.type), m); + ceph_msg_remove(m); + } + mutex_unlock(&con->out_mutex); + prepare_read_tag(con); +} + + + + + + +/* + * read (part of) a message. + */ +static int read_partial_message(struct ceph_connection *con) +{ + struct ceph_msg *m = con->in_msg; + void *p; + int ret; + int to, want, left; + unsigned front_len, middle_len, data_len, data_off; + int datacrc = con->msgr->nocrc; + + dout("read_partial_message con %p msg %p\n", con, m); + + /* header */ + while (con->in_base_pos < sizeof(con->in_hdr)) { + left = sizeof(con->in_hdr) - con->in_base_pos; + ret = ceph_tcp_recvmsg(con->sock, + (char *)&con->in_hdr + con->in_base_pos, + left); + if (ret <= 0) + return ret; + con->in_base_pos += ret; + if (con->in_base_pos == sizeof(con->in_hdr)) { + u32 crc = crc32c(0, (void *)&con->in_hdr, + sizeof(con->in_hdr) - sizeof(con->in_hdr.crc)); + if (crc != le32_to_cpu(con->in_hdr.crc)) { + pr_err("read_partial_message bad hdr " + " crc %u != expected %u\n", + crc, con->in_hdr.crc); + return -EBADMSG; + } + } + } + + front_len = le32_to_cpu(con->in_hdr.front_len); + if (front_len > CEPH_MSG_MAX_FRONT_LEN) + return -EIO; + middle_len = le32_to_cpu(con->in_hdr.middle_len); + if (middle_len > CEPH_MSG_MAX_DATA_LEN) + return -EIO; + data_len = le32_to_cpu(con->in_hdr.data_len); + if (data_len > CEPH_MSG_MAX_DATA_LEN) + return -EIO; + + /* allocate message? */ + if (!con->in_msg) { + dout("got hdr type %d front %d data %d\n", con->in_hdr.type, + con->in_hdr.front_len, con->in_hdr.data_len); + con->in_msg = con->ops->alloc_msg(con, &con->in_hdr); + if (!con->in_msg) { + /* skip this message */ + dout("alloc_msg returned NULL, skipping message\n"); + con->in_base_pos = -front_len - middle_len - data_len - + sizeof(m->footer); + con->in_tag = CEPH_MSGR_TAG_READY; + return 0; + } + if (IS_ERR(con->in_msg)) { + ret = PTR_ERR(con->in_msg); + con->in_msg = NULL; + con->error_msg = "out of memory for incoming message"; + return ret; + } + m = con->in_msg; + m->front.iov_len = 0; /* haven't read it yet */ + memcpy(&m->hdr, &con->in_hdr, sizeof(con->in_hdr)); + } + + /* front */ + while (m->front.iov_len < front_len) { + BUG_ON(m->front.iov_base == NULL); + left = front_len - m->front.iov_len; + ret = ceph_tcp_recvmsg(con->sock, (char *)m->front.iov_base + + m->front.iov_len, left); + if (ret <= 0) + return ret; + m->front.iov_len += ret; + if (m->front.iov_len == front_len) + con->in_front_crc = crc32c(0, m->front.iov_base, + m->front.iov_len); + } + + /* middle */ + while (middle_len > 0 && (!m->middle || + m->middle->vec.iov_len < middle_len)) { + if (m->middle == NULL) { + ret = -EOPNOTSUPP; + if (con->ops->alloc_middle) + ret = con->ops->alloc_middle(con, m); + if (ret < 0) { + dout("alloc_middle failed, skipping payload\n"); + con->in_base_pos = -middle_len - data_len + - sizeof(m->footer); + ceph_msg_put(con->in_msg); + con->in_msg = NULL; + con->in_tag = CEPH_MSGR_TAG_READY; + return 0; + } + m->middle->vec.iov_len = 0; + } + left = middle_len - m->middle->vec.iov_len; + ret = ceph_tcp_recvmsg(con->sock, + (char *)m->middle->vec.iov_base + + m->middle->vec.iov_len, left); + if (ret <= 0) + return ret; + m->middle->vec.iov_len += ret; + if (m->middle->vec.iov_len == middle_len) + con->in_middle_crc = crc32c(0, m->middle->vec.iov_base, + m->middle->vec.iov_len); + } + + /* (page) data */ + data_off = le16_to_cpu(m->hdr.data_off); + if (data_len == 0) + goto no_data; + + if (m->nr_pages == 0) { + con->in_msg_pos.page = 0; + con->in_msg_pos.page_pos = data_off & ~PAGE_MASK; + con->in_msg_pos.data_pos = 0; + /* find pages for data payload */ + want = calc_pages_for(data_off & ~PAGE_MASK, data_len); + ret = -1; + if (con->ops->prepare_pages) + ret = con->ops->prepare_pages(con, m, want); + if (ret < 0) { + dout("%p prepare_pages failed, skipping payload\n", m); + con->in_base_pos = -data_len - sizeof(m->footer); + ceph_msg_put(con->in_msg); + con->in_msg = NULL; + con->in_tag = CEPH_MSGR_TAG_READY; + return 0; + } + BUG_ON(m->nr_pages < want); + } + while (con->in_msg_pos.data_pos < data_len) { + left = min((int)(data_len - con->in_msg_pos.data_pos), + (int)(PAGE_SIZE - con->in_msg_pos.page_pos)); + BUG_ON(m->pages == NULL); + p = kmap(m->pages[con->in_msg_pos.page]); + ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos, + left); + if (ret > 0 && datacrc) + con->in_data_crc = + crc32c(con->in_data_crc, + p + con->in_msg_pos.page_pos, ret); + kunmap(m->pages[con->in_msg_pos.page]); + if (ret <= 0) + return ret; + con->in_msg_pos.data_pos += ret; + con->in_msg_pos.page_pos += ret; + if (con->in_msg_pos.page_pos == PAGE_SIZE) { + con->in_msg_pos.page_pos = 0; + con->in_msg_pos.page++; + } + } + +no_data: + /* footer */ + to = sizeof(m->hdr) + sizeof(m->footer); + while (con->in_base_pos < to) { + left = to - con->in_base_pos; + ret = ceph_tcp_recvmsg(con->sock, (char *)&m->footer + + (con->in_base_pos - sizeof(m->hdr)), + left); + if (ret <= 0) + return ret; + con->in_base_pos += ret; + } + dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n", + m, front_len, m->footer.front_crc, middle_len, + m->footer.middle_crc, data_len, m->footer.data_crc); + + /* crc ok? */ + if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) { + pr_err("read_partial_message %p front crc %u != exp. %u\n", + m, con->in_front_crc, m->footer.front_crc); + return -EBADMSG; + } + if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) { + pr_err("read_partial_message %p middle crc %u != exp %u\n", + m, con->in_middle_crc, m->footer.middle_crc); + return -EBADMSG; + } + if (datacrc && + (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 && + con->in_data_crc != le32_to_cpu(m->footer.data_crc)) { + pr_err("read_partial_message %p data crc %u != exp. %u\n", m, + con->in_data_crc, le32_to_cpu(m->footer.data_crc)); + return -EBADMSG; + } + + return 1; /* done! */ +} + +/* + * Process message. This happens in the worker thread. The callback should + * be careful not to do anything that waits on other incoming messages or it + * may deadlock. + */ +static void process_message(struct ceph_connection *con) +{ + struct ceph_msg *msg = con->in_msg; + + con->in_msg = NULL; + + /* if first message, set peer_name */ + if (con->peer_name.type == 0) + con->peer_name = msg->hdr.src.name; + + mutex_lock(&con->out_mutex); + con->in_seq++; + mutex_unlock(&con->out_mutex); + + dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n", + msg, le64_to_cpu(msg->hdr.seq), + ENTITY_NAME(msg->hdr.src.name), + le16_to_cpu(msg->hdr.type), + ceph_msg_type_name(le16_to_cpu(msg->hdr.type)), + le32_to_cpu(msg->hdr.front_len), + le32_to_cpu(msg->hdr.data_len), + con->in_front_crc, con->in_middle_crc, con->in_data_crc); + con->ops->dispatch(con, msg); + prepare_read_tag(con); +} + + +/* + * Write something to the socket. Called in a worker thread when the + * socket appears to be writeable and we have something ready to send. + */ +static int try_write(struct ceph_connection *con) +{ + struct ceph_messenger *msgr = con->msgr; + int ret = 1; + + dout("try_write start %p state %lu nref %d\n", con, con->state, + atomic_read(&con->nref)); + + mutex_lock(&con->out_mutex); +more: + dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes); + + /* open the socket first? */ + if (con->sock == NULL) { + /* + * if we were STANDBY and are reconnecting _this_ + * connection, bump connect_seq now. Always bump + * global_seq. + */ + if (test_and_clear_bit(STANDBY, &con->state)) + con->connect_seq++; + + prepare_write_connect(msgr, con); + prepare_read_connect(con); + set_bit(CONNECTING, &con->state); + + con->in_tag = CEPH_MSGR_TAG_READY; + dout("try_write initiating connect on %p new state %lu\n", + con, con->state); + con->sock = ceph_tcp_connect(con); + if (IS_ERR(con->sock)) { + con->sock = NULL; + con->error_msg = "connect error"; + ret = -1; + goto out; + } + } + +more_kvec: + /* kvec data queued? */ + if (con->out_skip) { + ret = write_partial_skip(con); + if (ret <= 0) + goto done; + if (ret < 0) { + dout("try_write write_partial_skip err %d\n", ret); + goto done; + } + } + if (con->out_kvec_left) { + ret = write_partial_kvec(con); + if (ret <= 0) + goto done; + if (ret < 0) { + dout("try_write write_partial_kvec err %d\n", ret); + goto done; + } + } + + /* msg pages? */ + if (con->out_msg) { + ret = write_partial_msg_pages(con); + if (ret == 1) + goto more_kvec; /* we need to send the footer, too! */ + if (ret == 0) + goto done; + if (ret < 0) { + dout("try_write write_partial_msg_pages err %d\n", + ret); + goto done; + } + } + + if (!test_bit(CONNECTING, &con->state)) { + /* is anything else pending? */ + if (!list_empty(&con->out_queue)) { + prepare_write_message(con); + goto more; + } + if (con->in_seq > con->in_seq_acked) { + prepare_write_ack(con); + goto more; + } + if (test_and_clear_bit(KEEPALIVE_PENDING, &con->state)) { + prepare_write_keepalive(con); + goto more; + } + } + + /* Nothing to do! */ + clear_bit(WRITE_PENDING, &con->state); + dout("try_write nothing else to write.\n"); +done: + ret = 0; +out: + mutex_unlock(&con->out_mutex); + dout("try_write done on %p\n", con); + return ret; +} + + + +/* + * Read what we can from the socket. + */ +static int try_read(struct ceph_connection *con) +{ + struct ceph_messenger *msgr; + int ret = -1; + + if (!con->sock) + return 0; + + if (test_bit(STANDBY, &con->state)) + return 0; + + dout("try_read start on %p\n", con); + msgr = con->msgr; + +more: + dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag, + con->in_base_pos); + if (test_bit(CONNECTING, &con->state)) { + dout("try_read connecting\n"); + ret = read_partial_connect(con); + if (ret <= 0) + goto done; + if (process_connect(con) < 0) { + ret = -1; + goto out; + } + goto more; + } + + if (con->in_base_pos < 0) { + /* + * skipping + discarding content. + * + * FIXME: there must be a better way to do this! + */ + static char buf[1024]; + int skip = min(1024, -con->in_base_pos); + dout("skipping %d / %d bytes\n", skip, -con->in_base_pos); + ret = ceph_tcp_recvmsg(con->sock, buf, skip); + if (ret <= 0) + goto done; + con->in_base_pos += ret; + if (con->in_base_pos) + goto more; + } + if (con->in_tag == CEPH_MSGR_TAG_READY) { + /* + * what's next? + */ + ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1); + if (ret <= 0) + goto done; + dout("try_read got tag %d\n", (int)con->in_tag); + switch (con->in_tag) { + case CEPH_MSGR_TAG_MSG: + prepare_read_message(con); + break; + case CEPH_MSGR_TAG_ACK: + prepare_read_ack(con); + break; + case CEPH_MSGR_TAG_CLOSE: + set_bit(CLOSED, &con->state); /* fixme */ + goto done; + default: + goto bad_tag; + } + } + if (con->in_tag == CEPH_MSGR_TAG_MSG) { + ret = read_partial_message(con); + if (ret <= 0) { + switch (ret) { + case -EBADMSG: + con->error_msg = "bad crc"; + ret = -EIO; + goto out; + case -EIO: + con->error_msg = "io error"; + goto out; + default: + goto done; + } + } + if (con->in_tag == CEPH_MSGR_TAG_READY) + goto more; + process_message(con); + goto more; + } + if (con->in_tag == CEPH_MSGR_TAG_ACK) { + ret = read_partial_ack(con); + if (ret <= 0) + goto done; + process_ack(con); + goto more; + } + +done: + ret = 0; +out: + dout("try_read done on %p\n", con); + return ret; + +bad_tag: + pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag); + con->error_msg = "protocol error, garbage tag"; + ret = -1; + goto out; +} + + +/* + * Atomically queue work on a connection. Bump @con reference to + * avoid races with connection teardown. + * + * There is some trickery going on with QUEUED and BUSY because we + * only want a _single_ thread operating on each connection at any + * point in time, but we want to use all available CPUs. + * + * The worker thread only proceeds if it can atomically set BUSY. It + * clears QUEUED and does it's thing. When it thinks it's done, it + * clears BUSY, then rechecks QUEUED.. if it's set again, it loops + * (tries again to set BUSY). + * + * To queue work, we first set QUEUED, _then_ if BUSY isn't set, we + * try to queue work. If that fails (work is already queued, or BUSY) + * we give up (work also already being done or is queued) but leave QUEUED + * set so that the worker thread will loop if necessary. + */ +static void queue_con(struct ceph_connection *con) +{ + if (test_bit(DEAD, &con->state)) { + dout("queue_con %p ignoring: DEAD\n", + con); + return; + } + + if (!con->ops->get(con)) { + dout("queue_con %p ref count 0\n", con); + return; + } + + set_bit(QUEUED, &con->state); + if (test_bit(BUSY, &con->state)) { + dout("queue_con %p - already BUSY\n", con); + con->ops->put(con); + } else if (!queue_work(ceph_msgr_wq, &con->work.work)) { + dout("queue_con %p - already queued\n", con); + con->ops->put(con); + } else { + dout("queue_con %p\n", con); + } +} + +/* + * Do some work on a connection. Drop a connection ref when we're done. + */ +static void con_work(struct work_struct *work) +{ + struct ceph_connection *con = container_of(work, struct ceph_connection, + work.work); + int backoff = 0; + +more: + if (test_and_set_bit(BUSY, &con->state) != 0) { + dout("con_work %p BUSY already set\n", con); + goto out; + } + dout("con_work %p start, clearing QUEUED\n", con); + clear_bit(QUEUED, &con->state); + + if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */ + dout("con_work CLOSED\n"); + con_close_socket(con); + goto done; + } + if (test_and_clear_bit(OPENING, &con->state)) { + /* reopen w/ new peer */ + dout("con_work OPENING\n"); + con_close_socket(con); + } + + if (test_and_clear_bit(SOCK_CLOSED, &con->state) || + try_read(con) < 0 || + try_write(con) < 0) { + backoff = 1; + ceph_fault(con); /* error/fault path */ + } + +done: + clear_bit(BUSY, &con->state); + dout("con->state=%lu\n", con->state); + if (test_bit(QUEUED, &con->state)) { + if (!backoff) { + dout("con_work %p QUEUED reset, looping\n", con); + goto more; + } + dout("con_work %p QUEUED reset, but just faulted\n", con); + clear_bit(QUEUED, &con->state); + } + dout("con_work %p done\n", con); + +out: + con->ops->put(con); +} + + +/* + * Generic error/fault handler. A retry mechanism is used with + * exponential backoff + */ +static void ceph_fault(struct ceph_connection *con) +{ + pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name), + pr_addr(&con->peer_addr.in_addr), con->error_msg); + dout("fault %p state %lu to peer %s\n", + con, con->state, pr_addr(&con->peer_addr.in_addr)); + + if (test_bit(LOSSYTX, &con->state)) { + dout("fault on LOSSYTX channel\n"); + goto out; + } + + clear_bit(BUSY, &con->state); /* to avoid an improbable race */ + + con_close_socket(con); + con->in_msg = NULL; + + /* If there are no messages in the queue, place the connection + * in a STANDBY state (i.e., don't try to reconnect just yet). */ + mutex_lock(&con->out_mutex); + if (list_empty(&con->out_queue) && !con->out_keepalive_pending) { + dout("fault setting STANDBY\n"); + set_bit(STANDBY, &con->state); + mutex_unlock(&con->out_mutex); + goto out; + } + + /* Requeue anything that hasn't been acked, and retry after a + * delay. */ + list_splice_init(&con->out_sent, &con->out_queue); + mutex_unlock(&con->out_mutex); + + if (con->delay == 0) + con->delay = BASE_DELAY_INTERVAL; + else if (con->delay < MAX_DELAY_INTERVAL) + con->delay *= 2; + + /* explicitly schedule work to try to reconnect again later. */ + dout("fault queueing %p delay %lu\n", con, con->delay); + con->ops->get(con); + if (queue_delayed_work(ceph_msgr_wq, &con->work, + round_jiffies_relative(con->delay)) == 0) + con->ops->put(con); + +out: + if (con->ops->fault) + con->ops->fault(con); +} + + + +/* + * create a new messenger instance + */ +struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr) +{ + struct ceph_messenger *msgr; + + msgr = kzalloc(sizeof(*msgr), GFP_KERNEL); + if (msgr == NULL) + return ERR_PTR(-ENOMEM); + + spin_lock_init(&msgr->global_seq_lock); + + /* the zero page is needed if a request is "canceled" while the message + * is being written over the socket */ + msgr->zero_page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!msgr->zero_page) { + kfree(msgr); + return ERR_PTR(-ENOMEM); + } + kmap(msgr->zero_page); + + if (myaddr) + msgr->inst.addr = *myaddr; + + /* select a random nonce */ + get_random_bytes(&msgr->inst.addr.nonce, + sizeof(msgr->inst.addr.nonce)); + + dout("messenger_create %p\n", msgr); + return msgr; +} + +void ceph_messenger_destroy(struct ceph_messenger *msgr) +{ + dout("destroy %p\n", msgr); + kunmap(msgr->zero_page); + __free_page(msgr->zero_page); + kfree(msgr); + dout("destroyed messenger %p\n", msgr); +} + +/* + * Queue up an outgoing message on the given connection. + */ +void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg) +{ + if (test_bit(CLOSED, &con->state)) { + dout("con_send %p closed, dropping %p\n", con, msg); + ceph_msg_put(msg); + return; + } + + /* set src+dst */ + msg->hdr.src = con->msgr->inst; + msg->hdr.orig_src = con->msgr->inst; + msg->hdr.dst_erank = con->peer_addr.erank; + + /* queue */ + mutex_lock(&con->out_mutex); + BUG_ON(!list_empty(&msg->list_head)); + list_add_tail(&msg->list_head, &con->out_queue); + dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg, + ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type), + ceph_msg_type_name(le16_to_cpu(msg->hdr.type)), + le32_to_cpu(msg->hdr.front_len), + le32_to_cpu(msg->hdr.middle_len), + le32_to_cpu(msg->hdr.data_len)); + mutex_unlock(&con->out_mutex); + + /* if there wasn't anything waiting to send before, queue + * new work */ + if (test_and_set_bit(WRITE_PENDING, &con->state) == 0) + queue_con(con); +} + +/* + * Revoke a message that was previously queued for send + */ +void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg) +{ + mutex_lock(&con->out_mutex); + if (!list_empty(&msg->list_head)) { + dout("con_revoke %p msg %p\n", con, msg); + list_del_init(&msg->list_head); + ceph_msg_put(msg); + msg->hdr.seq = 0; + if (con->out_msg == msg) + con->out_msg = NULL; + if (con->out_kvec_is_msg) { + con->out_skip = con->out_kvec_bytes; + con->out_kvec_is_msg = false; + } + } else { + dout("con_revoke %p msg %p - not queued (sent?)\n", con, msg); + } + mutex_unlock(&con->out_mutex); +} + +/* + * Queue a keepalive byte to ensure the tcp connection is alive. + */ +void ceph_con_keepalive(struct ceph_connection *con) +{ + if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 && + test_and_set_bit(WRITE_PENDING, &con->state) == 0) + queue_con(con); +} + + +/* + * construct a new message with given type, size + * the new msg has a ref count of 1. + */ +struct ceph_msg *ceph_msg_new(int type, int front_len, + int page_len, int page_off, struct page **pages) +{ + struct ceph_msg *m; + + m = kmalloc(sizeof(*m), GFP_NOFS); + if (m == NULL) + goto out; + atomic_set(&m->nref, 1); + INIT_LIST_HEAD(&m->list_head); + + m->hdr.type = cpu_to_le16(type); + m->hdr.front_len = cpu_to_le32(front_len); + m->hdr.middle_len = 0; + m->hdr.data_len = cpu_to_le32(page_len); + m->hdr.data_off = cpu_to_le16(page_off); + m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT); + m->footer.front_crc = 0; + m->footer.middle_crc = 0; + m->footer.data_crc = 0; + m->front_max = front_len; + m->front_is_vmalloc = false; + m->more_to_follow = false; + m->pool = NULL; + + /* front */ + if (front_len) { + if (front_len > PAGE_CACHE_SIZE) { + m->front.iov_base = __vmalloc(front_len, GFP_NOFS, + PAGE_KERNEL); + m->front_is_vmalloc = true; + } else { + m->front.iov_base = kmalloc(front_len, GFP_NOFS); + } + if (m->front.iov_base == NULL) { + pr_err("msg_new can't allocate %d bytes\n", + front_len); + goto out2; + } + } else { + m->front.iov_base = NULL; + } + m->front.iov_len = front_len; + + /* middle */ + m->middle = NULL; + + /* data */ + m->nr_pages = calc_pages_for(page_off, page_len); + m->pages = pages; + + dout("ceph_msg_new %p page %d~%d -> %d\n", m, page_off, page_len, + m->nr_pages); + return m; + +out2: + ceph_msg_put(m); +out: + pr_err("msg_new can't create type %d len %d\n", type, front_len); + return ERR_PTR(-ENOMEM); +} + +/* + * Generic message allocator, for incoming messages. + */ +struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con, + struct ceph_msg_header *hdr) +{ + int type = le16_to_cpu(hdr->type); + int front_len = le32_to_cpu(hdr->front_len); + struct ceph_msg *msg = ceph_msg_new(type, front_len, 0, 0, NULL); + + if (!msg) { + pr_err("unable to allocate msg type %d len %d\n", + type, front_len); + return ERR_PTR(-ENOMEM); + } + return msg; +} + +/* + * Allocate "middle" portion of a message, if it is needed and wasn't + * allocated by alloc_msg. This allows us to read a small fixed-size + * per-type header in the front and then gracefully fail (i.e., + * propagate the error to the caller based on info in the front) when + * the middle is too large. + */ +int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg) +{ + int type = le16_to_cpu(msg->hdr.type); + int middle_len = le32_to_cpu(msg->hdr.middle_len); + + dout("alloc_middle %p type %d %s middle_len %d\n", msg, type, + ceph_msg_type_name(type), middle_len); + BUG_ON(!middle_len); + BUG_ON(msg->middle); + + msg->middle = ceph_buffer_new_alloc(middle_len, GFP_NOFS); + if (!msg->middle) + return -ENOMEM; + return 0; +} + + +/* + * Free a generically kmalloc'd message. + */ +void ceph_msg_kfree(struct ceph_msg *m) +{ + dout("msg_kfree %p\n", m); + if (m->front_is_vmalloc) + vfree(m->front.iov_base); + else + kfree(m->front.iov_base); + kfree(m); +} + +/* + * Drop a msg ref. Destroy as needed. + */ +void ceph_msg_put(struct ceph_msg *m) +{ + dout("ceph_msg_put %p %d -> %d\n", m, atomic_read(&m->nref), + atomic_read(&m->nref)-1); + if (atomic_read(&m->nref) <= 0) { + pr_err("bad ceph_msg_put on %p %llu %d=%s %d+%d\n", + m, le64_to_cpu(m->hdr.seq), + le16_to_cpu(m->hdr.type), + ceph_msg_type_name(le16_to_cpu(m->hdr.type)), + le32_to_cpu(m->hdr.front_len), + le32_to_cpu(m->hdr.data_len)); + WARN_ON(1); + } + if (atomic_dec_and_test(&m->nref)) { + dout("ceph_msg_put last one on %p\n", m); + WARN_ON(!list_empty(&m->list_head)); + + /* drop middle, data, if any */ + if (m->middle) { + ceph_buffer_put(m->middle); + m->middle = NULL; + } + m->nr_pages = 0; + m->pages = NULL; + + if (m->pool) + ceph_msgpool_put(m->pool, m); + else + ceph_msg_kfree(m); + } +} diff --git a/fs/ceph/messenger.h b/fs/ceph/messenger.h new file mode 100644 index 000000000000..dcd98b64dca9 --- /dev/null +++ b/fs/ceph/messenger.h @@ -0,0 +1,243 @@ +#ifndef __FS_CEPH_MESSENGER_H +#define __FS_CEPH_MESSENGER_H + +#include +#include +#include +#include +#include +#include + +#include "types.h" +#include "buffer.h" + +struct ceph_msg; +struct ceph_connection; + +extern struct workqueue_struct *ceph_msgr_wq; /* receive work queue */ + +/* + * Ceph defines these callbacks for handling connection events. + */ +struct ceph_connection_operations { + struct ceph_connection *(*get)(struct ceph_connection *); + void (*put)(struct ceph_connection *); + + /* handle an incoming message. */ + void (*dispatch) (struct ceph_connection *con, struct ceph_msg *m); + + /* protocol version mismatch */ + void (*bad_proto) (struct ceph_connection *con); + + /* there was some error on the socket (disconnect, whatever) */ + void (*fault) (struct ceph_connection *con); + + /* a remote host as terminated a message exchange session, and messages + * we sent (or they tried to send us) may be lost. */ + void (*peer_reset) (struct ceph_connection *con); + + struct ceph_msg * (*alloc_msg) (struct ceph_connection *con, + struct ceph_msg_header *hdr); + int (*alloc_middle) (struct ceph_connection *con, + struct ceph_msg *msg); + /* an incoming message has a data payload; tell me what pages I + * should read the data into. */ + int (*prepare_pages) (struct ceph_connection *con, struct ceph_msg *m, + int want); +}; + +extern const char *ceph_name_type_str(int t); + +/* use format string %s%d */ +#define ENTITY_NAME(n) ceph_name_type_str((n).type), le64_to_cpu((n).num) + +struct ceph_messenger { + struct ceph_entity_inst inst; /* my name+address */ + struct page *zero_page; /* used in certain error cases */ + + bool nocrc; + + /* + * the global_seq counts connections i (attempt to) initiate + * in order to disambiguate certain connect race conditions. + */ + u32 global_seq; + spinlock_t global_seq_lock; +}; + +/* + * a single message. it contains a header (src, dest, message type, etc.), + * footer (crc values, mainly), a "front" message body, and possibly a + * data payload (stored in some number of pages). + */ +struct ceph_msg { + struct ceph_msg_header hdr; /* header */ + struct ceph_msg_footer footer; /* footer */ + struct kvec front; /* unaligned blobs of message */ + struct ceph_buffer *middle; + struct page **pages; /* data payload. NOT OWNER. */ + unsigned nr_pages; /* size of page array */ + struct list_head list_head; + atomic_t nref; + bool front_is_vmalloc; + bool more_to_follow; + int front_max; + + struct ceph_msgpool *pool; +}; + +struct ceph_msg_pos { + int page, page_pos; /* which page; offset in page */ + int data_pos; /* offset in data payload */ + int did_page_crc; /* true if we've calculated crc for current page */ +}; + +/* ceph connection fault delay defaults, for exponential backoff */ +#define BASE_DELAY_INTERVAL (HZ/2) +#define MAX_DELAY_INTERVAL (5 * 60 * HZ) + +/* + * ceph_connection state bit flags + * + * QUEUED and BUSY are used together to ensure that only a single + * thread is currently opening, reading or writing data to the socket. + */ +#define LOSSYTX 0 /* we can close channel or drop messages on errors */ +#define LOSSYRX 1 /* peer may reset/drop messages */ +#define CONNECTING 2 +#define KEEPALIVE_PENDING 3 +#define WRITE_PENDING 4 /* we have data ready to send */ +#define QUEUED 5 /* there is work queued on this connection */ +#define BUSY 6 /* work is being done */ +#define STANDBY 8 /* no outgoing messages, socket closed. we keep + * the ceph_connection around to maintain shared + * state with the peer. */ +#define CLOSED 10 /* we've closed the connection */ +#define SOCK_CLOSED 11 /* socket state changed to closed */ +#define REGISTERED 12 /* connection appears in con_tree */ +#define OPENING 13 /* open connection w/ (possibly new) peer */ +#define DEAD 14 /* dead, about to kfree */ + +/* + * A single connection with another host. + * + * We maintain a queue of outgoing messages, and some session state to + * ensure that we can preserve the lossless, ordered delivery of + * messages in the case of a TCP disconnect. + */ +struct ceph_connection { + void *private; + atomic_t nref; + + const struct ceph_connection_operations *ops; + + struct ceph_messenger *msgr; + struct socket *sock; + unsigned long state; /* connection state (see flags above) */ + const char *error_msg; /* error message, if any */ + + struct ceph_entity_addr peer_addr; /* peer address */ + struct ceph_entity_name peer_name; /* peer name */ + struct ceph_entity_addr peer_addr_for_me; + u32 connect_seq; /* identify the most recent connection + attempt for this connection, client */ + u32 peer_global_seq; /* peer's global seq for this connection */ + + /* out queue */ + struct mutex out_mutex; + struct list_head out_queue; + struct list_head out_sent; /* sending or sent but unacked */ + u64 out_seq; /* last message queued for send */ + u64 out_seq_sent; /* last message sent */ + bool out_keepalive_pending; + + u64 in_seq, in_seq_acked; /* last message received, acked */ + + /* connection negotiation temps */ + char in_banner[CEPH_BANNER_MAX_LEN]; + union { + struct { /* outgoing connection */ + struct ceph_msg_connect out_connect; + struct ceph_msg_connect_reply in_reply; + }; + struct { /* incoming */ + struct ceph_msg_connect in_connect; + struct ceph_msg_connect_reply out_reply; + }; + }; + struct ceph_entity_addr actual_peer_addr; + + /* message out temps */ + struct ceph_msg *out_msg; /* sending message (== tail of + out_sent) */ + struct ceph_msg_pos out_msg_pos; + + struct kvec out_kvec[8], /* sending header/footer data */ + *out_kvec_cur; + int out_kvec_left; /* kvec's left in out_kvec */ + int out_skip; /* skip this many bytes */ + int out_kvec_bytes; /* total bytes left */ + bool out_kvec_is_msg; /* kvec refers to out_msg */ + int out_more; /* there is more data after the kvecs */ + __le64 out_temp_ack; /* for writing an ack */ + + /* message in temps */ + struct ceph_msg_header in_hdr; + struct ceph_msg *in_msg; + struct ceph_msg_pos in_msg_pos; + u32 in_front_crc, in_middle_crc, in_data_crc; /* calculated crc */ + + char in_tag; /* protocol control byte */ + int in_base_pos; /* bytes read */ + __le64 in_temp_ack; /* for reading an ack */ + + struct delayed_work work; /* send|recv work */ + unsigned long delay; /* current delay interval */ +}; + + +extern const char *pr_addr(const struct sockaddr_storage *ss); +extern int ceph_parse_ips(const char *c, const char *end, + struct ceph_entity_addr *addr, + int max_count, int *count); + + +extern int ceph_msgr_init(void); +extern void ceph_msgr_exit(void); + +extern struct ceph_messenger *ceph_messenger_create( + struct ceph_entity_addr *myaddr); +extern void ceph_messenger_destroy(struct ceph_messenger *); + +extern void ceph_con_init(struct ceph_messenger *msgr, + struct ceph_connection *con); +extern void ceph_con_shutdown(struct ceph_connection *con); +extern void ceph_con_open(struct ceph_connection *con, + struct ceph_entity_addr *addr); +extern void ceph_con_close(struct ceph_connection *con); +extern void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg); +extern void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg); +extern void ceph_con_keepalive(struct ceph_connection *con); +extern struct ceph_connection *ceph_con_get(struct ceph_connection *con); +extern void ceph_con_put(struct ceph_connection *con); + +extern struct ceph_msg *ceph_msg_new(int type, int front_len, + int page_len, int page_off, + struct page **pages); +extern void ceph_msg_kfree(struct ceph_msg *m); + +extern struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con, + struct ceph_msg_header *hdr); +extern int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg); + + +static inline struct ceph_msg *ceph_msg_get(struct ceph_msg *msg) +{ + dout("ceph_msg_get %p %d -> %d\n", msg, atomic_read(&msg->nref), + atomic_read(&msg->nref)+1); + atomic_inc(&msg->nref); + return msg; +} +extern void ceph_msg_put(struct ceph_msg *msg); + +#endif -- cgit v1.2.3 From 8fc91fd85950d106883852c6d215614ec28cc92d Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 6 Oct 2009 11:31:13 -0700 Subject: ceph: message pools The msgpool is a basic mempool_t-like structure to preallocate messages we expect to receive over the wire. This ensures we have the necessary memory preallocated to process replies to requests, or to process unsolicited messages from various servers. Signed-off-by: Sage Weil --- fs/ceph/msgpool.c | 167 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ fs/ceph/msgpool.h | 26 +++++++++ 2 files changed, 193 insertions(+) create mode 100644 fs/ceph/msgpool.c create mode 100644 fs/ceph/msgpool.h diff --git a/fs/ceph/msgpool.c b/fs/ceph/msgpool.c new file mode 100644 index 000000000000..39d4d7ed82ce --- /dev/null +++ b/fs/ceph/msgpool.c @@ -0,0 +1,167 @@ +#include "ceph_debug.h" + +#include +#include +#include +#include + +#include "msgpool.h" + +/* + * We use msg pools to preallocate memory for messages we expect to + * receive over the wire, to avoid getting ourselves into OOM + * conditions at unexpected times. We take use a few different + * strategies: + * + * - for request/response type interactions, we preallocate the + * memory needed for the response when we generate the request. + * + * - for messages we can receive at any time from the MDS, we preallocate + * a pool of messages we can re-use. + * + * - for writeback, we preallocate some number of messages to use for + * requests and their replies, so that we always make forward + * progress. + * + * The msgpool behaves like a mempool_t, but keeps preallocated + * ceph_msgs strung together on a list_head instead of using a pointer + * vector. This avoids vector reallocation when we adjust the number + * of preallocated items (which happens frequently). + */ + + +/* + * Allocate or release as necessary to meet our target pool size. + */ +static int __fill_msgpool(struct ceph_msgpool *pool) +{ + struct ceph_msg *msg; + + while (pool->num < pool->min) { + dout("fill_msgpool %p %d/%d allocating\n", pool, pool->num, + pool->min); + spin_unlock(&pool->lock); + msg = ceph_msg_new(0, pool->front_len, 0, 0, NULL); + spin_lock(&pool->lock); + if (IS_ERR(msg)) + return PTR_ERR(msg); + msg->pool = pool; + list_add(&msg->list_head, &pool->msgs); + pool->num++; + } + while (pool->num > pool->min) { + msg = list_first_entry(&pool->msgs, struct ceph_msg, list_head); + dout("fill_msgpool %p %d/%d releasing %p\n", pool, pool->num, + pool->min, msg); + list_del_init(&msg->list_head); + pool->num--; + ceph_msg_kfree(msg); + } + return 0; +} + +int ceph_msgpool_init(struct ceph_msgpool *pool, + int front_len, int min, bool blocking) +{ + int ret; + + dout("msgpool_init %p front_len %d min %d\n", pool, front_len, min); + spin_lock_init(&pool->lock); + pool->front_len = front_len; + INIT_LIST_HEAD(&pool->msgs); + pool->num = 0; + pool->min = min; + pool->blocking = blocking; + init_waitqueue_head(&pool->wait); + + spin_lock(&pool->lock); + ret = __fill_msgpool(pool); + spin_unlock(&pool->lock); + return ret; +} + +void ceph_msgpool_destroy(struct ceph_msgpool *pool) +{ + dout("msgpool_destroy %p\n", pool); + spin_lock(&pool->lock); + pool->min = 0; + __fill_msgpool(pool); + spin_unlock(&pool->lock); +} + +int ceph_msgpool_resv(struct ceph_msgpool *pool, int delta) +{ + int ret; + + spin_lock(&pool->lock); + dout("msgpool_resv %p delta %d\n", pool, delta); + pool->min += delta; + ret = __fill_msgpool(pool); + spin_unlock(&pool->lock); + return ret; +} + +struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool) +{ + wait_queue_t wait; + struct ceph_msg *msg; + + if (pool->blocking) { + /* mempool_t behavior; first try to alloc */ + msg = ceph_msg_new(0, pool->front_len, 0, 0, NULL); + if (!IS_ERR(msg)) + return msg; + } + + while (1) { + spin_lock(&pool->lock); + if (likely(pool->num)) { + msg = list_entry(pool->msgs.next, struct ceph_msg, + list_head); + list_del_init(&msg->list_head); + pool->num--; + dout("msgpool_get %p got %p, now %d/%d\n", pool, msg, + pool->num, pool->min); + spin_unlock(&pool->lock); + return msg; + } + pr_err("msgpool_get %p now %d/%d, %s\n", pool, pool->num, + pool->min, pool->blocking ? "waiting" : "failing"); + spin_unlock(&pool->lock); + + if (!pool->blocking) { + WARN_ON(1); + + /* maybe we can allocate it now? */ + msg = ceph_msg_new(0, pool->front_len, 0, 0, NULL); + if (!IS_ERR(msg)) + return msg; + + return ERR_PTR(-ENOMEM); + } + + init_wait(&wait); + prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE); + schedule(); + finish_wait(&pool->wait, &wait); + } +} + +void ceph_msgpool_put(struct ceph_msgpool *pool, struct ceph_msg *msg) +{ + spin_lock(&pool->lock); + if (pool->num < pool->min) { + ceph_msg_get(msg); /* retake a single ref */ + list_add(&msg->list_head, &pool->msgs); + pool->num++; + dout("msgpool_put %p reclaim %p, now %d/%d\n", pool, msg, + pool->num, pool->min); + spin_unlock(&pool->lock); + wake_up(&pool->wait); + } else { + dout("msgpool_put %p drop %p, at %d/%d\n", pool, msg, + pool->num, pool->min); + spin_unlock(&pool->lock); + ceph_msg_kfree(msg); + } +} diff --git a/fs/ceph/msgpool.h b/fs/ceph/msgpool.h new file mode 100644 index 000000000000..07a2decaa6d8 --- /dev/null +++ b/fs/ceph/msgpool.h @@ -0,0 +1,26 @@ +#ifndef _FS_CEPH_MSGPOOL +#define _FS_CEPH_MSGPOOL + +#include "messenger.h" + +/* + * we use memory pools for preallocating messages we may receive, to + * avoid unexpected OOM conditions. + */ +struct ceph_msgpool { + spinlock_t lock; + int front_len; /* preallocated payload size */ + struct list_head msgs; /* msgs in the pool; each has 1 ref */ + int num, min; /* cur, min # msgs in the pool */ + bool blocking; + wait_queue_head_t wait; +}; + +extern int ceph_msgpool_init(struct ceph_msgpool *pool, + int front_len, int size, bool blocking); +extern void ceph_msgpool_destroy(struct ceph_msgpool *pool); +extern int ceph_msgpool_resv(struct ceph_msgpool *, int delta); +extern struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *); +extern void ceph_msgpool_put(struct ceph_msgpool *, struct ceph_msg *); + +#endif -- cgit v1.2.3 From a8e63b7d51cce4557ee7bcd8f51be5cae8547d20 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 6 Oct 2009 11:31:13 -0700 Subject: ceph: nfs re-export support Basic NFS re-export support is included. This mostly works. However, Ceph's MDS design precludes the ability to generate a (small) filehandle that will be valid forever, so this is of limited utility. Signed-off-by: Sage Weil --- fs/ceph/export.c | 223 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 223 insertions(+) create mode 100644 fs/ceph/export.c diff --git a/fs/ceph/export.c b/fs/ceph/export.c new file mode 100644 index 000000000000..fc68e39cbad6 --- /dev/null +++ b/fs/ceph/export.c @@ -0,0 +1,223 @@ +#include "ceph_debug.h" + +#include +#include + +#include "super.h" + +/* + * NFS export support + * + * NFS re-export of a ceph mount is, at present, only semireliable. + * The basic issue is that the Ceph architectures doesn't lend itself + * well to generating filehandles that will remain valid forever. + * + * So, we do our best. If you're lucky, your inode will be in the + * client's cache. If it's not, and you have a connectable fh, then + * the MDS server may be able to find it for you. Otherwise, you get + * ESTALE. + * + * There are ways to this more reliable, but in the non-connectable fh + * case, we won't every work perfectly, and in the connectable case, + * some changes are needed on the MDS side to work better. + */ + +/* + * Basic fh + */ +struct ceph_nfs_fh { + u64 ino; +} __attribute__ ((packed)); + +/* + * Larger 'connectable' fh that includes parent ino and name hash. + * Use this whenever possible, as it works more reliably. + */ +struct ceph_nfs_confh { + u64 ino, parent_ino; + u32 parent_name_hash; +} __attribute__ ((packed)); + +static int ceph_encode_fh(struct dentry *dentry, u32 *rawfh, int *max_len, + int connectable) +{ + struct ceph_nfs_fh *fh = (void *)rawfh; + struct ceph_nfs_confh *cfh = (void *)rawfh; + struct dentry *parent = dentry->d_parent; + struct inode *inode = dentry->d_inode; + int type; + + /* don't re-export snaps */ + if (ceph_snap(inode) != CEPH_NOSNAP) + return -EINVAL; + + if (*max_len >= sizeof(*cfh)) { + dout("encode_fh %p connectable\n", dentry); + cfh->ino = ceph_ino(dentry->d_inode); + cfh->parent_ino = ceph_ino(parent->d_inode); + cfh->parent_name_hash = parent->d_name.hash; + *max_len = sizeof(*cfh); + type = 2; + } else if (*max_len > sizeof(*fh)) { + if (connectable) + return -ENOSPC; + dout("encode_fh %p\n", dentry); + fh->ino = ceph_ino(dentry->d_inode); + *max_len = sizeof(*fh); + type = 1; + } else { + return -ENOSPC; + } + return type; +} + +/* + * convert regular fh to dentry + * + * FIXME: we should try harder by querying the mds for the ino. + */ +static struct dentry *__fh_to_dentry(struct super_block *sb, + struct ceph_nfs_fh *fh) +{ + struct inode *inode; + struct dentry *dentry; + struct ceph_vino vino; + int err; + + dout("__fh_to_dentry %llx\n", fh->ino); + vino.ino = fh->ino; + vino.snap = CEPH_NOSNAP; + inode = ceph_find_inode(sb, vino); + if (!inode) + return ERR_PTR(-ESTALE); + + dentry = d_obtain_alias(inode); + if (!dentry) { + pr_err("fh_to_dentry %llx -- inode %p but ENOMEM\n", + fh->ino, inode); + iput(inode); + return ERR_PTR(-ENOMEM); + } + err = ceph_init_dentry(dentry); + + if (err < 0) { + iput(inode); + return ERR_PTR(err); + } + dout("__fh_to_dentry %llx %p dentry %p\n", fh->ino, inode, dentry); + return dentry; +} + +/* + * convert connectable fh to dentry + */ +static struct dentry *__cfh_to_dentry(struct super_block *sb, + struct ceph_nfs_confh *cfh) +{ + struct ceph_mds_client *mdsc = &ceph_client(sb)->mdsc; + struct inode *inode; + struct dentry *dentry; + struct ceph_vino vino; + int err; + + dout("__cfh_to_dentry %llx (%llx/%x)\n", + cfh->ino, cfh->parent_ino, cfh->parent_name_hash); + + vino.ino = cfh->ino; + vino.snap = CEPH_NOSNAP; + inode = ceph_find_inode(sb, vino); + if (!inode) { + struct ceph_mds_request *req; + + req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPHASH, + USE_ANY_MDS); + if (IS_ERR(req)) + return ERR_PTR(PTR_ERR(req)); + + req->r_ino1 = vino; + req->r_ino2.ino = cfh->parent_ino; + req->r_ino2.snap = CEPH_NOSNAP; + req->r_path2 = kmalloc(16, GFP_NOFS); + snprintf(req->r_path2, 16, "%d", cfh->parent_name_hash); + req->r_num_caps = 1; + err = ceph_mdsc_do_request(mdsc, NULL, req); + ceph_mdsc_put_request(req); + inode = ceph_find_inode(sb, vino); + if (!inode) + return ERR_PTR(err ? err : -ESTALE); + } + + dentry = d_obtain_alias(inode); + if (!dentry) { + pr_err("cfh_to_dentry %llx -- inode %p but ENOMEM\n", + cfh->ino, inode); + iput(inode); + return ERR_PTR(-ENOMEM); + } + err = ceph_init_dentry(dentry); + if (err < 0) { + iput(inode); + return ERR_PTR(err); + } + dout("__cfh_to_dentry %llx %p dentry %p\n", cfh->ino, inode, dentry); + return dentry; +} + +static struct dentry *ceph_fh_to_dentry(struct super_block *sb, struct fid *fid, + int fh_len, int fh_type) +{ + if (fh_type == 1) + return __fh_to_dentry(sb, (struct ceph_nfs_fh *)fid->raw); + else + return __cfh_to_dentry(sb, (struct ceph_nfs_confh *)fid->raw); +} + +/* + * get parent, if possible. + * + * FIXME: we could do better by querying the mds to discover the + * parent. + */ +static struct dentry *ceph_fh_to_parent(struct super_block *sb, + struct fid *fid, + int fh_len, int fh_type) +{ + struct ceph_nfs_confh *cfh = (void *)fid->raw; + struct ceph_vino vino; + struct inode *inode; + struct dentry *dentry; + int err; + + if (fh_type == 1) + return ERR_PTR(-ESTALE); + + pr_debug("fh_to_parent %llx/%d\n", cfh->parent_ino, + cfh->parent_name_hash); + + vino.ino = cfh->ino; + vino.snap = CEPH_NOSNAP; + inode = ceph_find_inode(sb, vino); + if (!inode) + return ERR_PTR(-ESTALE); + + dentry = d_obtain_alias(inode); + if (!dentry) { + pr_err("fh_to_parent %llx -- inode %p but ENOMEM\n", + cfh->ino, inode); + iput(inode); + return ERR_PTR(-ENOMEM); + } + err = ceph_init_dentry(dentry); + if (err < 0) { + iput(inode); + return ERR_PTR(err); + } + dout("fh_to_parent %llx %p dentry %p\n", cfh->ino, inode, dentry); + return dentry; +} + +const struct export_operations ceph_export_ops = { + .encode_fh = ceph_encode_fh, + .fh_to_dentry = ceph_fh_to_dentry, + .fh_to_parent = ceph_fh_to_parent, +}; -- cgit v1.2.3 From 8f4e91dee2a245e4be6942f4a8d83a769e13a47d Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 6 Oct 2009 11:31:14 -0700 Subject: ceph: ioctls A few Ceph ioctls for getting and setting file layout (striping) parameters, and learning the identity and network address of the OSD a given region of a file is stored on. Signed-off-by: Sage Weil --- Documentation/ioctl/ioctl-number.txt | 1 + fs/ceph/ioctl.c | 157 +++++++++++++++++++++++++++++++++++ fs/ceph/ioctl.h | 39 +++++++++ 3 files changed, 197 insertions(+) create mode 100644 fs/ceph/ioctl.c create mode 100644 fs/ceph/ioctl.h diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt index 947374977ca5..91cfdd76131e 100644 --- a/Documentation/ioctl/ioctl-number.txt +++ b/Documentation/ioctl/ioctl-number.txt @@ -182,6 +182,7 @@ Code Seq# Include File Comments 0x90 00 drivers/cdrom/sbpcd.h 0x93 60-7F linux/auto_fs.h +0x97 00-7F fs/ceph/ioctl.h Ceph file system 0x99 00-0F 537-Addinboard driver 0xA0 all linux/sdp/sdp.h Industrial Device Project diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c new file mode 100644 index 000000000000..e4f99eff5d93 --- /dev/null +++ b/fs/ceph/ioctl.c @@ -0,0 +1,157 @@ +#include + +#include "ioctl.h" +#include "super.h" +#include "ceph_debug.h" + + +/* + * ioctls + */ + +/* + * get and set the file layout + */ +static long ceph_ioctl_get_layout(struct file *file, void __user *arg) +{ + struct ceph_inode_info *ci = ceph_inode(file->f_dentry->d_inode); + struct ceph_ioctl_layout l; + int err; + + err = ceph_do_getattr(file->f_dentry->d_inode, CEPH_STAT_CAP_LAYOUT); + if (!err) { + l.stripe_unit = ceph_file_layout_su(ci->i_layout); + l.stripe_count = ceph_file_layout_stripe_count(ci->i_layout); + l.object_size = ceph_file_layout_object_size(ci->i_layout); + l.data_pool = le32_to_cpu(ci->i_layout.fl_pg_pool); + if (copy_to_user(arg, &l, sizeof(l))) + return -EFAULT; + } + + return err; +} + +static long ceph_ioctl_set_layout(struct file *file, void __user *arg) +{ + struct inode *inode = file->f_dentry->d_inode; + struct inode *parent_inode = file->f_dentry->d_parent->d_inode; + struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc; + struct ceph_mds_request *req; + struct ceph_ioctl_layout l; + int err, i; + + /* copy and validate */ + if (copy_from_user(&l, arg, sizeof(l))) + return -EFAULT; + + if ((l.object_size & ~PAGE_MASK) || + (l.stripe_unit & ~PAGE_MASK) || + !l.stripe_unit || + (l.object_size && + (unsigned)l.object_size % (unsigned)l.stripe_unit)) + return -EINVAL; + + /* make sure it's a valid data pool */ + if (l.data_pool > 0) { + mutex_lock(&mdsc->mutex); + err = -EINVAL; + for (i = 0; i < mdsc->mdsmap->m_num_data_pg_pools; i++) + if (mdsc->mdsmap->m_data_pg_pools[i] == l.data_pool) { + err = 0; + break; + } + mutex_unlock(&mdsc->mutex); + if (err) + return err; + } + + req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETLAYOUT, + USE_AUTH_MDS); + if (IS_ERR(req)) + return PTR_ERR(req); + req->r_inode = igrab(inode); + req->r_inode_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL; + + req->r_args.setlayout.layout.fl_stripe_unit = + cpu_to_le32(l.stripe_unit); + req->r_args.setlayout.layout.fl_stripe_count = + cpu_to_le32(l.stripe_count); + req->r_args.setlayout.layout.fl_object_size = + cpu_to_le32(l.object_size); + req->r_args.setlayout.layout.fl_pg_pool = cpu_to_le32(l.data_pool); + req->r_args.setlayout.layout.fl_pg_preferred = cpu_to_le32((s32)-1); + + err = ceph_mdsc_do_request(mdsc, parent_inode, req); + ceph_mdsc_put_request(req); + return err; +} + +/* + * Return object name, size/offset information, and location (OSD + * number, network address) for a given file offset. + */ +static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg) +{ + struct ceph_ioctl_dataloc dl; + struct inode *inode = file->f_dentry->d_inode; + struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_osd_client *osdc = &ceph_client(inode->i_sb)->osdc; + u64 len = 1, olen; + u64 tmp; + struct ceph_object_layout ol; + union ceph_pg pgid; + + /* copy and validate */ + if (copy_from_user(&dl, arg, sizeof(dl))) + return -EFAULT; + + down_read(&osdc->map_sem); + ceph_calc_file_object_mapping(&ci->i_layout, dl.file_offset, &len, + &dl.object_no, &dl.object_offset, &olen); + dl.file_offset -= dl.object_offset; + dl.object_size = ceph_file_layout_object_size(ci->i_layout); + dl.block_size = ceph_file_layout_su(ci->i_layout); + + /* block_offset = object_offset % block_size */ + tmp = dl.object_offset; + dl.block_offset = do_div(tmp, dl.block_size); + + snprintf(dl.object_name, sizeof(dl.object_name), "%llx.%08llx", + ceph_ino(inode), dl.object_no); + ceph_calc_object_layout(&ol, dl.object_name, &ci->i_layout, + osdc->osdmap); + + pgid.pg64 = le64_to_cpu(ol.ol_pgid); + dl.osd = ceph_calc_pg_primary(osdc->osdmap, pgid); + if (dl.osd >= 0) { + struct ceph_entity_addr *a = + ceph_osd_addr(osdc->osdmap, dl.osd); + if (a) + memcpy(&dl.osd_addr, &a->in_addr, sizeof(dl.osd_addr)); + } else { + memset(&dl.osd_addr, 0, sizeof(dl.osd_addr)); + } + up_read(&osdc->map_sem); + + /* send result back to user */ + if (copy_to_user(arg, &dl, sizeof(dl))) + return -EFAULT; + + return 0; +} + +long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + dout("ioctl file %p cmd %u arg %lu\n", file, cmd, arg); + switch (cmd) { + case CEPH_IOC_GET_LAYOUT: + return ceph_ioctl_get_layout(file, (void __user *)arg); + + case CEPH_IOC_SET_LAYOUT: + return ceph_ioctl_set_layout(file, (void __user *)arg); + + case CEPH_IOC_GET_DATALOC: + return ceph_ioctl_get_dataloc(file, (void __user *)arg); + } + return -ENOTTY; +} diff --git a/fs/ceph/ioctl.h b/fs/ceph/ioctl.h new file mode 100644 index 000000000000..3c511dab3730 --- /dev/null +++ b/fs/ceph/ioctl.h @@ -0,0 +1,39 @@ +#ifndef FS_CEPH_IOCTL_H +#define FS_CEPH_IOCTL_H + +#include +#include + +#define CEPH_IOCTL_MAGIC 0x97 + +/* just use u64 to align sanely on all archs */ +struct ceph_ioctl_layout { + __u64 stripe_unit, stripe_count, object_size; + __u64 data_pool; +}; + +#define CEPH_IOC_GET_LAYOUT _IOR(CEPH_IOCTL_MAGIC, 1, \ + struct ceph_ioctl_layout) +#define CEPH_IOC_SET_LAYOUT _IOW(CEPH_IOCTL_MAGIC, 2, \ + struct ceph_ioctl_layout) + +/* + * Extract identity, address of the OSD and object storing a given + * file offset. + */ +struct ceph_ioctl_dataloc { + __u64 file_offset; /* in+out: file offset */ + __u64 object_offset; /* out: offset in object */ + __u64 object_no; /* out: object # */ + __u64 object_size; /* out: object size */ + char object_name[64]; /* out: object name */ + __u64 block_offset; /* out: offset in block */ + __u64 block_size; /* out: block length */ + __s64 osd; /* out: osd # */ + struct sockaddr_storage osd_addr; /* out: osd address */ +}; + +#define CEPH_IOC_GET_DATALOC _IOWR(CEPH_IOCTL_MAGIC, 3, \ + struct ceph_ioctl_dataloc) + +#endif -- cgit v1.2.3 From 76aa844d5b2fb8c839180d3f5874e333b297e5fd Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 6 Oct 2009 11:31:14 -0700 Subject: ceph: debugfs Basic state information is available via /sys/kernel/debug/ceph, including instances of the client, fsids, current monitor, mds and osd maps, outstanding server requests, and hooks to adjust debug levels. Signed-off-by: Sage Weil --- fs/ceph/debugfs.c | 425 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 425 insertions(+) create mode 100644 fs/ceph/debugfs.c diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c new file mode 100644 index 000000000000..9edbad32f116 --- /dev/null +++ b/fs/ceph/debugfs.c @@ -0,0 +1,425 @@ +#include "ceph_debug.h" + +#include +#include +#include +#include + +#include "super.h" +#include "mds_client.h" + +/* + * Implement /sys/kernel/debug/ceph fun + * + * /sys/kernel/debug/ceph/client* - an instance of the ceph client + * .../osdmap - current osdmap + * .../mdsmap - current mdsmap + * .../monmap - current monmap + * .../osdc - active osd requests + * .../mdsc - active mds requests + * .../monc - mon client state + * .../dentry_lru - dump contents of dentry lru + * .../caps - expose cap (reservation) stats + */ + +static struct dentry *ceph_debugfs_dir; + +static int monmap_show(struct seq_file *s, void *p) +{ + int i; + struct ceph_client *client = s->private; + + if (client->monc.monmap == NULL) + return 0; + + seq_printf(s, "epoch %d\n", client->monc.monmap->epoch); + for (i = 0; i < client->monc.monmap->num_mon; i++) { + struct ceph_entity_inst *inst = + &client->monc.monmap->mon_inst[i]; + + seq_printf(s, "\t%s%lld\t%s\n", + ENTITY_NAME(inst->name), + pr_addr(&inst->addr.in_addr)); + } + return 0; +} + +static int mdsmap_show(struct seq_file *s, void *p) +{ + int i; + struct ceph_client *client = s->private; + + if (client->mdsc.mdsmap == NULL) + return 0; + seq_printf(s, "epoch %d\n", client->mdsc.mdsmap->m_epoch); + seq_printf(s, "root %d\n", client->mdsc.mdsmap->m_root); + seq_printf(s, "session_timeout %d\n", + client->mdsc.mdsmap->m_session_timeout); + seq_printf(s, "session_autoclose %d\n", + client->mdsc.mdsmap->m_session_autoclose); + for (i = 0; i < client->mdsc.mdsmap->m_max_mds; i++) { + struct ceph_entity_addr *addr = + &client->mdsc.mdsmap->m_info[i].addr; + int state = client->mdsc.mdsmap->m_info[i].state; + + seq_printf(s, "\tmds%d\t%s\t(%s)\n", i, pr_addr(&addr->in_addr), + ceph_mds_state_name(state)); + } + return 0; +} + +static int osdmap_show(struct seq_file *s, void *p) +{ + int i; + struct ceph_client *client = s->private; + + if (client->osdc.osdmap == NULL) + return 0; + seq_printf(s, "epoch %d\n", client->osdc.osdmap->epoch); + seq_printf(s, "flags%s%s\n", + (client->osdc.osdmap->flags & CEPH_OSDMAP_NEARFULL) ? + " NEARFULL" : "", + (client->osdc.osdmap->flags & CEPH_OSDMAP_FULL) ? + " FULL" : ""); + for (i = 0; i < client->osdc.osdmap->num_pools; i++) { + struct ceph_pg_pool_info *pool = + &client->osdc.osdmap->pg_pool[i]; + seq_printf(s, "pg_pool %d pg_num %d / %d, lpg_num %d / %d\n", + i, pool->v.pg_num, pool->pg_num_mask, + pool->v.lpg_num, pool->lpg_num_mask); + } + for (i = 0; i < client->osdc.osdmap->max_osd; i++) { + struct ceph_entity_addr *addr = + &client->osdc.osdmap->osd_addr[i]; + int state = client->osdc.osdmap->osd_state[i]; + char sb[64]; + + seq_printf(s, "\tosd%d\t%s\t%3d%%\t(%s)\n", + i, pr_addr(&addr->in_addr), + ((client->osdc.osdmap->osd_weight[i]*100) >> 16), + ceph_osdmap_state_str(sb, sizeof(sb), state)); + } + return 0; +} + +static int monc_show(struct seq_file *s, void *p) +{ + struct ceph_client *client = s->private; + struct ceph_mon_statfs_request *req; + u64 nexttid = 0; + int got; + struct ceph_mon_client *monc = &client->monc; + + mutex_lock(&monc->mutex); + + if (monc->have_mdsmap) + seq_printf(s, "have mdsmap %u\n", (unsigned)monc->have_mdsmap); + if (monc->have_osdmap) + seq_printf(s, "have osdmap %u\n", (unsigned)monc->have_osdmap); + if (monc->want_next_osdmap) + seq_printf(s, "want next osdmap\n"); + + while (nexttid < monc->last_tid) { + got = radix_tree_gang_lookup(&monc->statfs_request_tree, + (void **)&req, nexttid, 1); + if (got == 0) + break; + nexttid = req->tid + 1; + + seq_printf(s, "%lld statfs\n", req->tid); + } + mutex_unlock(&monc->mutex); + + return 0; +} + +static int mdsc_show(struct seq_file *s, void *p) +{ + struct ceph_client *client = s->private; + struct ceph_mds_request *req; + u64 nexttid = 0; + int got; + struct ceph_mds_client *mdsc = &client->mdsc; + int pathlen; + u64 pathbase; + char *path; + + mutex_lock(&mdsc->mutex); + while (nexttid < mdsc->last_tid) { + got = radix_tree_gang_lookup(&mdsc->request_tree, + (void **)&req, nexttid, 1); + if (got == 0) + break; + nexttid = req->r_tid + 1; + + if (req->r_request) + seq_printf(s, "%lld\tmds%d\t", req->r_tid, req->r_mds); + else + seq_printf(s, "%lld\t(no request)\t", req->r_tid); + + seq_printf(s, "%s", ceph_mds_op_name(req->r_op)); + + if (req->r_got_unsafe) + seq_printf(s, "\t(unsafe)"); + else + seq_printf(s, "\t"); + + if (req->r_inode) { + seq_printf(s, " #%llx", ceph_ino(req->r_inode)); + } else if (req->r_dentry) { + path = ceph_mdsc_build_path(req->r_dentry, &pathlen, + &pathbase, 0); + spin_lock(&req->r_dentry->d_lock); + seq_printf(s, " #%llx/%.*s (%s)", + ceph_ino(req->r_dentry->d_parent->d_inode), + req->r_dentry->d_name.len, + req->r_dentry->d_name.name, + path ? path : ""); + spin_unlock(&req->r_dentry->d_lock); + kfree(path); + } else if (req->r_path1) { + seq_printf(s, " #%llx/%s", req->r_ino1.ino, + req->r_path1); + } + + if (req->r_old_dentry) { + path = ceph_mdsc_build_path(req->r_old_dentry, &pathlen, + &pathbase, 0); + spin_lock(&req->r_old_dentry->d_lock); + seq_printf(s, " #%llx/%.*s (%s)", + ceph_ino(req->r_old_dentry->d_parent->d_inode), + req->r_old_dentry->d_name.len, + req->r_old_dentry->d_name.name, + path ? path : ""); + spin_unlock(&req->r_old_dentry->d_lock); + kfree(path); + } else if (req->r_path2) { + if (req->r_ino2.ino) + seq_printf(s, " #%llx/%s", req->r_ino2.ino, + req->r_path2); + else + seq_printf(s, " %s", req->r_path2); + } + + seq_printf(s, "\n"); + } + mutex_unlock(&mdsc->mutex); + + return 0; +} + +static int osdc_show(struct seq_file *s, void *pp) +{ + struct ceph_client *client = s->private; + struct ceph_osd_client *osdc = &client->osdc; + struct rb_node *p; + + mutex_lock(&osdc->request_mutex); + for (p = rb_first(&osdc->requests); p; p = rb_next(p)) { + struct ceph_osd_request *req; + struct ceph_osd_request_head *head; + struct ceph_osd_op *op; + int num_ops; + int opcode, olen; + int i; + + req = rb_entry(p, struct ceph_osd_request, r_node); + + seq_printf(s, "%lld\tosd%d\t", req->r_tid, + req->r_osd ? req->r_osd->o_osd : -1); + + head = req->r_request->front.iov_base; + op = (void *)(head + 1); + + num_ops = le16_to_cpu(head->num_ops); + olen = le32_to_cpu(head->object_len); + seq_printf(s, "%.*s", olen, + (const char *)(head->ops + num_ops)); + + if (req->r_reassert_version.epoch) + seq_printf(s, "\t%u'%llu", + (unsigned)le32_to_cpu(req->r_reassert_version.epoch), + le64_to_cpu(req->r_reassert_version.version)); + else + seq_printf(s, "\t"); + + for (i = 0; i < num_ops; i++) { + opcode = le16_to_cpu(op->op); + seq_printf(s, "\t%s", ceph_osd_op_name(opcode)); + op++; + } + + seq_printf(s, "\n"); + } + mutex_unlock(&osdc->request_mutex); + return 0; +} + +static int caps_show(struct seq_file *s, void *p) +{ + struct ceph_client *client = p; + int total, avail, used, reserved; + + ceph_reservation_status(client, &total, &avail, &used, &reserved); + seq_printf(s, "total\t\t%d\n" + "avail\t\t%d\n" + "used\t\t%d\n" + "reserved\t%d\n", + total, avail, used, reserved); + return 0; +} + +static int dentry_lru_show(struct seq_file *s, void *ptr) +{ + struct ceph_client *client = s->private; + struct ceph_mds_client *mdsc = &client->mdsc; + struct ceph_dentry_info *di; + + spin_lock(&mdsc->dentry_lru_lock); + list_for_each_entry(di, &mdsc->dentry_lru, lru) { + struct dentry *dentry = di->dentry; + seq_printf(s, "%p %p\t%.*s\n", + di, dentry, dentry->d_name.len, dentry->d_name.name); + } + spin_unlock(&mdsc->dentry_lru_lock); + + return 0; +} + +#define DEFINE_SHOW_FUNC(name) \ +static int name##_open(struct inode *inode, struct file *file) \ +{ \ + struct seq_file *sf; \ + int ret; \ + \ + ret = single_open(file, name, NULL); \ + sf = file->private_data; \ + sf->private = inode->i_private; \ + return ret; \ +} \ + \ +static const struct file_operations name##_fops = { \ + .open = name##_open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ +}; + +DEFINE_SHOW_FUNC(monmap_show) +DEFINE_SHOW_FUNC(mdsmap_show) +DEFINE_SHOW_FUNC(osdmap_show) +DEFINE_SHOW_FUNC(monc_show) +DEFINE_SHOW_FUNC(mdsc_show) +DEFINE_SHOW_FUNC(osdc_show) +DEFINE_SHOW_FUNC(dentry_lru_show) +DEFINE_SHOW_FUNC(caps_show) + +int __init ceph_debugfs_init(void) +{ + ceph_debugfs_dir = debugfs_create_dir("ceph", NULL); + if (!ceph_debugfs_dir) + return -ENOMEM; + return 0; +} + +void ceph_debugfs_cleanup(void) +{ + debugfs_remove(ceph_debugfs_dir); +} + +int ceph_debugfs_client_init(struct ceph_client *client) +{ + int ret = 0; + char name[80]; + + snprintf(name, sizeof(name), FSID_FORMAT ".client%lld", + PR_FSID(&client->monc.monmap->fsid), client->whoami); + + client->debugfs_dir = debugfs_create_dir(name, ceph_debugfs_dir); + if (!client->debugfs_dir) + goto out; + + client->monc.debugfs_file = debugfs_create_file("monc", + 0600, + client->debugfs_dir, + client, + &monc_show_fops); + if (!client->monc.debugfs_file) + goto out; + + client->mdsc.debugfs_file = debugfs_create_file("mdsc", + 0600, + client->debugfs_dir, + client, + &mdsc_show_fops); + if (!client->mdsc.debugfs_file) + goto out; + + client->osdc.debugfs_file = debugfs_create_file("osdc", + 0600, + client->debugfs_dir, + client, + &osdc_show_fops); + if (!client->osdc.debugfs_file) + goto out; + + client->debugfs_monmap = debugfs_create_file("monmap", + 0600, + client->debugfs_dir, + client, + &monmap_show_fops); + if (!client->debugfs_monmap) + goto out; + + client->debugfs_mdsmap = debugfs_create_file("mdsmap", + 0600, + client->debugfs_dir, + client, + &mdsmap_show_fops); + if (!client->debugfs_mdsmap) + goto out; + + client->debugfs_osdmap = debugfs_create_file("osdmap", + 0600, + client->debugfs_dir, + client, + &osdmap_show_fops); + if (!client->debugfs_osdmap) + goto out; + + client->debugfs_dentry_lru = debugfs_create_file("dentry_lru", + 0600, + client->debugfs_dir, + client, + &dentry_lru_show_fops); + if (!client->debugfs_dentry_lru) + goto out; + + client->debugfs_caps = debugfs_create_file("caps", + 0400, + client->debugfs_dir, + client, + &caps_show_fops); + if (!client->debugfs_caps) + goto out; + + return 0; + +out: + ceph_debugfs_client_cleanup(client); + return ret; +} + +void ceph_debugfs_client_cleanup(struct ceph_client *client) +{ + debugfs_remove(client->debugfs_caps); + debugfs_remove(client->debugfs_dentry_lru); + debugfs_remove(client->debugfs_osdmap); + debugfs_remove(client->debugfs_mdsmap); + debugfs_remove(client->debugfs_monmap); + debugfs_remove(client->osdc.debugfs_file); + debugfs_remove(client->mdsc.debugfs_file); + debugfs_remove(client->monc.debugfs_file); + debugfs_remove(client->debugfs_dir); +} + -- cgit v1.2.3 From 9030aaf9bf0a1eee47a154c316c789e959638b0f Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 6 Oct 2009 11:31:15 -0700 Subject: ceph: Kconfig, Makefile Kconfig options and Makefile. Signed-off-by: Sage Weil --- MAINTAINERS | 9 +++++++++ fs/Kconfig | 1 + fs/Makefile | 1 + fs/ceph/Kconfig | 26 ++++++++++++++++++++++++++ fs/ceph/Makefile | 36 ++++++++++++++++++++++++++++++++++++ 5 files changed, 73 insertions(+) create mode 100644 fs/ceph/Kconfig create mode 100644 fs/ceph/Makefile diff --git a/MAINTAINERS b/MAINTAINERS index c450f3abb8c9..9b680ff8c5c0 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1294,6 +1294,15 @@ F: arch/powerpc/include/asm/spu*.h F: arch/powerpc/oprofile/*cell* F: arch/powerpc/platforms/cell/ +CEPH DISTRIBUTED FILE SYSTEM CLIENT +M: Sage Weil +L: ceph-devel@lists.sourceforge.net +W: http://ceph.newdream.net/ +T: git git://ceph.newdream.net/linux-ceph-client.git +S: Supported +F: Documentation/filesystems/ceph.txt +F: fs/ceph + CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM: M: David Vrabel L: linux-usb@vger.kernel.org diff --git a/fs/Kconfig b/fs/Kconfig index d4bf8caad8d0..ca687092a334 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -234,6 +234,7 @@ config NFS_COMMON source "net/sunrpc/Kconfig" source "fs/smbfs/Kconfig" +source "fs/ceph/Kconfig" source "fs/cifs/Kconfig" source "fs/ncpfs/Kconfig" source "fs/coda/Kconfig" diff --git a/fs/Makefile b/fs/Makefile index af6d04700d9c..5ef73a0a9f25 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -124,3 +124,4 @@ obj-$(CONFIG_OCFS2_FS) += ocfs2/ obj-$(CONFIG_BTRFS_FS) += btrfs/ obj-$(CONFIG_GFS2_FS) += gfs2/ obj-$(CONFIG_EXOFS_FS) += exofs/ +obj-$(CONFIG_CEPH_FS) += ceph/ diff --git a/fs/ceph/Kconfig b/fs/ceph/Kconfig new file mode 100644 index 000000000000..bc1fbd956187 --- /dev/null +++ b/fs/ceph/Kconfig @@ -0,0 +1,26 @@ +config CEPH_FS + tristate "Ceph distributed file system (EXPERIMENTAL)" + depends on INET && EXPERIMENTAL + select LIBCRC32C + help + Choose Y or M here to include support for mounting the + experimental Ceph distributed file system. Ceph is an extremely + scalable file system designed to provide high performance, + reliable access to petabytes of storage. + + More information at http://ceph.newdream.net/. + + If unsure, say N. + +config CEPH_FS_PRETTYDEBUG + bool "Include file:line in ceph debug output" + depends on CEPH_FS + default n + help + If you say Y here, debug output will include a filename and + line to aid debugging. This icnreases kernel size and slows + execution slightly when debug call sites are enabled (e.g., + via CONFIG_DYNAMIC_DEBUG). + + If unsure, say N. + diff --git a/fs/ceph/Makefile b/fs/ceph/Makefile new file mode 100644 index 000000000000..7da6d69dba29 --- /dev/null +++ b/fs/ceph/Makefile @@ -0,0 +1,36 @@ +# +# Makefile for CEPH filesystem. +# + +ifneq ($(KERNELRELEASE),) + +obj-$(CONFIG_CEPH_FS) += ceph.o + +ceph-objs := super.o inode.o dir.o file.o addr.o ioctl.o \ + export.o caps.o snap.o xattr.o \ + messenger.o msgpool.o buffer.o \ + mds_client.o mdsmap.o \ + mon_client.o \ + osd_client.o osdmap.o crush/crush.o crush/mapper.o \ + debugfs.o \ + ceph_fs.o ceph_strings.o ceph_frag.o + +else +#Otherwise we were called directly from the command +# line; invoke the kernel build system. + +KERNELDIR ?= /lib/modules/$(shell uname -r)/build +PWD := $(shell pwd) + +default: all + +all: + $(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_FS=m modules + +modules_install: + $(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_FS=m modules_install + +clean: + $(MAKE) -C $(KERNELDIR) M=$(PWD) clean + +endif -- cgit v1.2.3 From e324b8f991679a43e09dd13500bf1988c0bfc0ea Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 6 Oct 2009 12:20:56 -0700 Subject: ceph: document shared files in README Document files shared between kernel and user code trees. Signed-off-by: Sage Weil --- fs/ceph/README | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 fs/ceph/README diff --git a/fs/ceph/README b/fs/ceph/README new file mode 100644 index 000000000000..231a1df5eafe --- /dev/null +++ b/fs/ceph/README @@ -0,0 +1,17 @@ +# +# The following files are shared by (and manually synchronized +# between) the Ceph userland and kernel client. +# +# userland kernel +src/include/ceph_fs.h fs/ceph/ceph_fs.h +src/include/ceph_fs.cc fs/ceph/ceph_fs.c +src/include/msgr.h fs/ceph/msgr.h +src/include/rados.h fs/ceph/rados.h +src/include/ceph_strings.cc fs/ceph/ceph_strings.c +src/include/ceph_frag.h fs/ceph/ceph_frag.h +src/include/ceph_frag.cc fs/ceph/ceph_frag.c +src/crush/crush.c fs/ceph/crush/crush.c +src/crush/crush.h fs/ceph/crush/crush.h +src/crush/mapper.c fs/ceph/crush/mapper.c +src/crush/mapper.h fs/ceph/crush/mapper.h +src/crush/hash.h fs/ceph/crush/hash.h -- cgit v1.2.3 From fa0b72e9e2900ee87886aaf8bc4c4701be1e081d Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 7 Oct 2009 10:59:10 -0700 Subject: ceph: show meaningful version on module load Kill the old git revision; print the ceph version and protocol versions instead. Signed-off-by: Sage Weil --- fs/ceph/ceph_ver.h | 6 ------ fs/ceph/super.c | 5 +++-- 2 files changed, 3 insertions(+), 8 deletions(-) delete mode 100644 fs/ceph/ceph_ver.h diff --git a/fs/ceph/ceph_ver.h b/fs/ceph/ceph_ver.h deleted file mode 100644 index 66c3727b671b..000000000000 --- a/fs/ceph/ceph_ver.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __CEPH_VERSION_H -#define __CEPH_VERSION_H - -#define CEPH_GIT_VER 335cd8f952b457095ea2a66aee3db50efb63c91d - -#endif diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 0723fb6e96a1..b3404a319c22 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -16,7 +16,6 @@ #include #include -#include "ceph_ver.h" #include "decode.h" #include "super.h" #include "mon_client.h" @@ -903,7 +902,9 @@ static int __init init_ceph(void) if (ret) goto out_icache; - pr_info("loaded (%s)\n", STRINGIFY(CEPH_GIT_VER)); + pr_info("loaded %d.%d.%d (mon/mds/osd proto %d/%d/%d)\n", + CEPH_VERSION_MAJOR, CEPH_VERSION_MINOR, CEPH_VERSION_PATCH, + CEPH_MONC_PROTOCOL, CEPH_MDSC_PROTOCOL, CEPH_OSDC_PROTOCOL); return 0; out_icache: -- cgit v1.2.3 From b195befd9acb514dd2afb722e63fdd880ed63217 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 7 Oct 2009 10:59:30 -0700 Subject: ceph: include preferred_osd in file layout virtual xattr Signed-off-by: Sage Weil --- fs/ceph/xattr.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index 8eaac04d1b87..65b3a84bbb2e 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c @@ -91,11 +91,18 @@ static struct ceph_vxattr_cb ceph_dir_vxattrs[] = { static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val, size_t size) { - return snprintf(val, size, + int ret; + + ret = snprintf(val, size, "chunk_bytes=%lld\nstripe_count=%lld\nobject_size=%lld\n", (unsigned long long)ceph_file_layout_su(ci->i_layout), (unsigned long long)ceph_file_layout_stripe_count(ci->i_layout), (unsigned long long)ceph_file_layout_object_size(ci->i_layout)); + if (ceph_file_layout_pg_preferred(ci->i_layout)) + ret += snprintf(val + ret, size, "preferred_osd=%lld\n", + (unsigned long long)ceph_file_layout_pg_preferred( + ci->i_layout)); + return ret; } static struct ceph_vxattr_cb ceph_file_vxattrs[] = { -- cgit v1.2.3 From b28813a61d6ffe05ad353a86965607bb7a7fd60f Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 7 Oct 2009 10:59:34 -0700 Subject: ceph: gracefully avoid empty crush buckets This avoids a divide by zero when the input and/or map are malformed. Signed-off-by: Sage Weil --- fs/ceph/crush/mapper.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/fs/ceph/crush/mapper.c b/fs/ceph/crush/mapper.c index 0f0730c62695..c268393adfcb 100644 --- a/fs/ceph/crush/mapper.c +++ b/fs/ceph/crush/mapper.c @@ -299,7 +299,7 @@ static int crush_choose(struct crush_map *map, struct crush_bucket *in = bucket; int r; int i; - int item; + int item = 0; int itemtype; int collide, reject; const int orig_tries = 5; /* attempts before we fall back to search */ @@ -316,6 +316,7 @@ static int crush_choose(struct crush_map *map, /* choose through intervening buckets */ flocal = 0; do { + collide = 0; retry_bucket = 0; r = rep; if (in->alg == CRUSH_BUCKET_UNIFORM) { @@ -340,6 +341,10 @@ static int crush_choose(struct crush_map *map, } /* bucket choose */ + if (in->size == 0) { + reject = 1; + goto reject; + } if (flocal >= (in->size>>1) && flocal > orig_tries) item = bucket_perm_choose(in, x, r); @@ -363,7 +368,6 @@ static int crush_choose(struct crush_map *map, } /* collision? */ - collide = 0; for (i = 0; i < outpos; i++) { if (out[i] == item) { collide = 1; @@ -388,6 +392,7 @@ static int crush_choose(struct crush_map *map, reject = 0; } +reject: if (reject || collide) { ftotal++; flocal++; -- cgit v1.2.3 From e251e288082d5e89604eee1fef0c31bed1fe8f02 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 7 Oct 2009 16:38:19 -0700 Subject: ceph: fix mdsmap decoding when multiple mds's are present A misplaced sizeof() around namelen was throwing things off. Signed-off-by: Sage Weil --- fs/ceph/mdsmap.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c index 15913cbeb289..09180d8fafe4 100644 --- a/fs/ceph/mdsmap.c +++ b/fs/ceph/mdsmap.c @@ -85,28 +85,28 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end) void *pexport_targets = NULL; ceph_decode_need(p, end, sizeof(addr) + 1 + sizeof(u32), bad); - *p += sizeof(addr); /* skip addr key */ + ceph_decode_copy(p, &addr, sizeof(addr)); ceph_decode_8(p, infoversion); ceph_decode_32(p, namelen); /* skip mds name */ *p += namelen; ceph_decode_need(p, end, - 5*sizeof(u32) + sizeof(u64) + + 4*sizeof(u32) + sizeof(u64) + sizeof(addr) + sizeof(struct ceph_timespec), bad); ceph_decode_32(p, mds); ceph_decode_32(p, inc); ceph_decode_32(p, state); ceph_decode_64(p, state_seq); - ceph_decode_copy(p, &addr, sizeof(addr)); + *p += sizeof(addr); *p += sizeof(struct ceph_timespec); *p += sizeof(u32); ceph_decode_32_safe(p, end, namelen, bad); - *p += sizeof(namelen); + *p += namelen; if (infoversion >= 2) { ceph_decode_32_safe(p, end, num_export_targets, bad); pexport_targets = *p; - *p += sizeof(num_export_targets * sizeof(u32)); + *p += num_export_targets * sizeof(u32); } else { num_export_targets = 0; } -- cgit v1.2.3 From 0656d11ba6ffa3dee0e8916a1903f96185651217 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Thu, 8 Oct 2009 10:25:46 -0700 Subject: ceph: renew mon subscription before it expires Be conservative: renew subscription once half the interval has expired. Do not reuse sub expiration to control hunting. Signed-off-by: Sage Weil --- fs/ceph/mon_client.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/ceph/mon_client.c b/fs/ceph/mon_client.c index b0c95cec5df8..9c34df17fa4b 100644 --- a/fs/ceph/mon_client.c +++ b/fs/ceph/mon_client.c @@ -213,7 +213,7 @@ static void handle_subscribe_ack(struct ceph_mon_client *monc, monc->hunting = false; } dout("handle_subscribe_ack after %d seconds\n", seconds); - monc->sub_renew_after = monc->sub_sent + seconds*HZ - 1; + monc->sub_renew_after = monc->sub_sent + (seconds >> 1)*HZ - 1; monc->sub_sent = 0; mutex_unlock(&monc->mutex); return; @@ -512,7 +512,7 @@ static void delayed_work(struct work_struct *work) if (monc->want_mount) { __request_mount(monc); } else { - if (__sub_expired(monc)) { + if (monc->hunting) { __close_session(monc); __open_session(monc); /* continue hunting */ } else { -- cgit v1.2.3 From c1ea8823be69ebebaface912142190e910711984 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Thu, 8 Oct 2009 16:55:47 -0700 Subject: ceph: fix osd request submission race The osd request submission path registers the request, drops and retakes the request_mutex, then sends it to the OSD. A racing kick_requests could sent it during that interval, causing the same msg to be sent twice and BUGing in the msgr. Fix by only sending the message if it hasn't been touched by other threads. Signed-off-by: Sage Weil --- fs/ceph/osd_client.c | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index 978593a4f466..d14019dd6868 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -837,7 +837,8 @@ static void kick_requests(struct ceph_osd_client *osdc, } kick: - dout("kicking tid %llu osd%d\n", req->r_tid, req->r_osd->o_osd); + dout("kicking %p tid %llu osd%d\n", req, req->r_tid, + req->r_osd->o_osd); req->r_flags |= CEPH_OSD_FLAG_RETRY; err = __send_request(osdc, req); if (err) { @@ -1016,7 +1017,7 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req, bool nofail) { - int rc; + int rc = 0; req->r_request->pages = req->r_pages; req->r_request->nr_pages = req->r_num_pages; @@ -1025,15 +1026,22 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc, down_read(&osdc->map_sem); mutex_lock(&osdc->request_mutex); - rc = __send_request(osdc, req); - if (rc) { - if (nofail) { - dout("osdc_start_request failed send, marking %lld\n", - req->r_tid); - req->r_resend = true; - rc = 0; - } else { - __unregister_request(osdc, req); + /* + * a racing kick_requests() may have sent the message for us + * while we dropped request_mutex above, so only send now if + * the request still han't been touched yet. + */ + if (req->r_sent == 0) { + rc = __send_request(osdc, req); + if (rc) { + if (nofail) { + dout("osdc_start_request failed send, " + " marking %lld\n", req->r_tid); + req->r_resend = true; + rc = 0; + } else { + __unregister_request(osdc, req); + } } } mutex_unlock(&osdc->request_mutex); -- cgit v1.2.3 From 0ba6478df7c6bef0f4b2625554545f941f89fb97 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Thu, 8 Oct 2009 16:57:16 -0700 Subject: ceph: revoke osd request message on request completion If an osd has failed or returned and a request has been sent twice, it's possible to get a reply and unregister the request while the request message is queued for delivery. Since the message references the caller's page vector, we need to revoke it before completing. Signed-off-by: Sage Weil --- fs/ceph/osd_client.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index d14019dd6868..0aea8afaa072 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -469,10 +469,15 @@ static void __unregister_request(struct ceph_osd_client *osdc, rb_erase(&req->r_node, &osdc->requests); osdc->num_requests--; - list_del_init(&req->r_osd_item); - if (list_empty(&req->r_osd->o_requests)) - remove_osd(osdc, req->r_osd); - req->r_osd = NULL; + if (req->r_osd) { + /* make sure the original request isn't in flight. */ + ceph_con_revoke(&req->r_osd->o_con, req->r_request); + + list_del_init(&req->r_osd_item); + if (list_empty(&req->r_osd->o_requests)) + remove_osd(osdc, req->r_osd); + req->r_osd = NULL; + } ceph_osdc_put_request(req); -- cgit v1.2.3 From 991abb6ecfc8edf9863aa6a3f43249e63f9d4d4e Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Thu, 8 Oct 2009 22:22:37 -0700 Subject: ceph: fail gracefully on corrupt osdmap (bad pg_temp mapping) Return an error and report a corrupt map instead of crying BUG(). Signed-off-by: Sage Weil --- fs/ceph/osdmap.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c index e38fe6309b1c..342e5f80996b 100644 --- a/fs/ceph/osdmap.c +++ b/fs/ceph/osdmap.c @@ -366,8 +366,8 @@ static int osdmap_set_max_osd(struct ceph_osdmap *map, int max) /* * Insert a new pg_temp mapping */ -static void __insert_pg_mapping(struct ceph_pg_mapping *new, - struct rb_root *root) +static int __insert_pg_mapping(struct ceph_pg_mapping *new, + struct rb_root *root) { struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; @@ -381,11 +381,12 @@ static void __insert_pg_mapping(struct ceph_pg_mapping *new, else if (new->pgid > pg->pgid) p = &(*p)->rb_right; else - BUG(); + return -EEXIST; } rb_link_node(&new->node, parent, p); rb_insert_color(&new->node, root); + return 0; } /* @@ -481,7 +482,9 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) for (j = 0; j < n; j++) ceph_decode_32(p, pg->osds[j]); - __insert_pg_mapping(pg, &map->pg_temp); + err = __insert_pg_mapping(pg, &map->pg_temp); + if (err) + goto bad; dout(" added pg_temp %llx len %d\n", pgid, len); } @@ -681,7 +684,9 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, pg->len = pglen; for (j = 0; j < len; j++) ceph_decode_32(p, pg->osds[j]); - __insert_pg_mapping(pg, &map->pg_temp); + err = __insert_pg_mapping(pg, &map->pg_temp); + if (err) + goto bad; dout(" added pg_temp %llx len %d\n", pgid, pglen); } } -- cgit v1.2.3 From 81b024e70fed635a2cf5a4bf911db1649bb005f5 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 9 Oct 2009 10:29:18 -0700 Subject: ceph: reset osd session on fault, not peer_reset The peer_reset just takes longer (until we reconnect and discover the osd dropped the session... which it will). Signed-off-by: Sage Weil --- fs/ceph/osd_client.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index 0aea8afaa072..4a8b4f08d4ae 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -294,10 +294,7 @@ __lookup_request_ge(struct ceph_osd_client *osdc, /* - * The messaging layer will reconnect to the osd as needed. If the - * session has dropped, the OSD will have dropped the session state, - * and we'll get notified by the messaging layer. If that happens, we - * need to resubmit all requests for that osd. + * If the osd connection drops, we need to resubmit all requests. */ static void osd_reset(struct ceph_connection *con) { @@ -1301,7 +1298,7 @@ const static struct ceph_connection_operations osd_con_ops = { .put = put_osd_con, .dispatch = dispatch, .alloc_msg = alloc_msg, - .peer_reset = osd_reset, + .fault = osd_reset, .alloc_middle = ceph_alloc_middle, .prepare_pages = prepare_pages, }; -- cgit v1.2.3 From 266673db423e6ab247170d6c6d72ec36e530a911 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 9 Oct 2009 10:31:32 -0700 Subject: ceph: cancel osd requests before resending them This ensures we don't submit the same request twice if we are kicking a specific osd (as with an osd_reset), or when we hit a transient error and resend. Signed-off-by: Sage Weil --- fs/ceph/osd_client.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index 4a8b4f08d4ae..8e33928647f4 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -813,10 +813,13 @@ static void kick_requests(struct ceph_osd_client *osdc, if (req->r_resend) { dout(" r_resend set on tid %llu\n", req->r_tid); + __cancel_request(req); goto kick; } - if (req->r_osd && kickosd == req->r_osd) + if (req->r_osd && kickosd == req->r_osd) { + __cancel_request(req); goto kick; + } err = __map_osds(osdc, req); if (err == 0) -- cgit v1.2.3 From 13e38c8ae771d73bf6d1f0f98e35f99c0f0d48ff Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 9 Oct 2009 16:36:34 -0700 Subject: ceph: update to mon client protocol v15 The mon request headers now include session_mon information that must be properly initialized. Signed-off-by: Sage Weil --- fs/ceph/ceph_fs.h | 19 ++++++++++++------- fs/ceph/messenger.c | 2 +- fs/ceph/mon_client.c | 8 ++++++-- 3 files changed, 19 insertions(+), 10 deletions(-) diff --git a/fs/ceph/ceph_fs.h b/fs/ceph/ceph_fs.h index 21ed51b127f2..acf24c6944c7 100644 --- a/fs/ceph/ceph_fs.h +++ b/fs/ceph/ceph_fs.h @@ -37,10 +37,10 @@ */ #define CEPH_OSD_PROTOCOL 7 /* cluster internal */ #define CEPH_MDS_PROTOCOL 9 /* cluster internal */ -#define CEPH_MON_PROTOCOL 4 /* cluster internal */ +#define CEPH_MON_PROTOCOL 5 /* cluster internal */ #define CEPH_OSDC_PROTOCOL 20 /* server/client */ #define CEPH_MDSC_PROTOCOL 29 /* server/client */ -#define CEPH_MONC_PROTOCOL 14 /* server/client */ +#define CEPH_MONC_PROTOCOL 15 /* server/client */ #define CEPH_INO_ROOT 1 @@ -118,9 +118,14 @@ struct ceph_file_layout { #define CEPH_MSG_OSD_OP 42 #define CEPH_MSG_OSD_OPREPLY 43 +struct ceph_mon_request_header { + __le64 have_version; + __le16 session_mon; + __le64 session_mon_tid; +} __attribute__ ((packed)); struct ceph_mon_statfs { - __le64 have_version; + struct ceph_mon_request_header monhdr; struct ceph_fsid fsid; __le64 tid; } __attribute__ ((packed)); @@ -138,22 +143,22 @@ struct ceph_mon_statfs_reply { } __attribute__ ((packed)); struct ceph_osd_getmap { - __le64 have_version; + struct ceph_mon_request_header monhdr; struct ceph_fsid fsid; __le32 start; } __attribute__ ((packed)); struct ceph_mds_getmap { - __le64 have_version; + struct ceph_mon_request_header monhdr; struct ceph_fsid fsid; } __attribute__ ((packed)); struct ceph_client_mount { - __le64 have_version; + struct ceph_mon_request_header monhdr; } __attribute__ ((packed)); struct ceph_mon_subscribe_item { - __le64 have; + __le64 have_version; __le64 have; __u8 onetime; } __attribute__ ((packed)); diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index 63f7f1359385..b48abc0b3be7 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -857,7 +857,7 @@ out: static int verify_hello(struct ceph_connection *con) { if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) { - pr_err("connect to/from %s has bad banner\n", + pr_err("connect to %s got bad banner\n", pr_addr(&con->peer_addr.in_addr)); con->error_msg = "protocol error, bad banner"; return -1; diff --git a/fs/ceph/mon_client.c b/fs/ceph/mon_client.c index 9c34df17fa4b..dc698caccc42 100644 --- a/fs/ceph/mon_client.c +++ b/fs/ceph/mon_client.c @@ -273,7 +273,9 @@ static void __request_mount(struct ceph_mon_client *monc) if (IS_ERR(msg)) return; h = msg->front.iov_base; - h->have_version = 0; + h->monhdr.have_version = 0; + h->monhdr.session_mon = cpu_to_le16(-1); + h->monhdr.session_mon_tid = 0; ceph_con_send(monc->con, msg); } @@ -422,7 +424,9 @@ static int send_statfs(struct ceph_mon_client *monc, return PTR_ERR(msg); req->request = msg; h = msg->front.iov_base; - h->have_version = 0; + h->monhdr.have_version = 0; + h->monhdr.session_mon = cpu_to_le16(-1); + h->monhdr.session_mon_tid = 0; h->fsid = monc->monmap->fsid; h->tid = cpu_to_le64(req->tid); ceph_con_send(monc->con, msg); -- cgit v1.2.3 From 752727a1b21a462d6ef634d552f180ae692f8947 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 9 Oct 2009 16:38:45 -0700 Subject: ceph: add file layout validation This tracks updates to code shared with userspace. Signed-off-by: Sage Weil --- fs/ceph/ceph_fs.c | 24 ++++++++++++++++++++++++ fs/ceph/ceph_fs.h | 2 ++ 2 files changed, 26 insertions(+) diff --git a/fs/ceph/ceph_fs.c b/fs/ceph/ceph_fs.c index 9371ff1c0002..a950b4083577 100644 --- a/fs/ceph/ceph_fs.c +++ b/fs/ceph/ceph_fs.c @@ -3,6 +3,30 @@ */ #include "types.h" +/* + * return true if @layout appears to be valid + */ +int ceph_file_layout_is_valid(const struct ceph_file_layout *layout) +{ + __u32 su = le32_to_cpu(layout->fl_stripe_unit); + __u32 sc = le32_to_cpu(layout->fl_stripe_count); + __u32 os = le32_to_cpu(layout->fl_object_size); + + /* stripe unit, object size must be non-zero, 64k increment */ + if (!su || (su & (CEPH_MIN_STRIPE_UNIT-1))) + return 0; + if (!os || (os & (CEPH_MIN_STRIPE_UNIT-1))) + return 0; + /* object size must be a multiple of stripe unit */ + if (os < su || os % su) + return 0; + /* stripe count must be non-zero */ + if (!sc) + return 0; + return 1; +} + + int ceph_flags_to_mode(int flags) { #ifdef O_DIRECTORY /* fixme */ diff --git a/fs/ceph/ceph_fs.h b/fs/ceph/ceph_fs.h index acf24c6944c7..b3bbab1952c9 100644 --- a/fs/ceph/ceph_fs.h +++ b/fs/ceph/ceph_fs.h @@ -72,7 +72,9 @@ struct ceph_file_layout { __le32 fl_pg_pool; /* namespace, crush ruleset, rep level */ } __attribute__ ((packed)); +#define CEPH_MIN_STRIPE_UNIT 65536 +int ceph_file_layout_is_valid(const struct ceph_file_layout *layout); /********************************************* -- cgit v1.2.3 From 8fc57da4d32767cc6096ecaed24636dabefd1dbc Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 12 Oct 2009 10:28:13 -0700 Subject: ceph: ignore trailing data in monamp This lets us extend the format more easily. Signed-off-by: Sage Weil --- fs/ceph/mon_client.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/fs/ceph/mon_client.c b/fs/ceph/mon_client.c index dc698caccc42..d6c8e783467e 100644 --- a/fs/ceph/mon_client.c +++ b/fs/ceph/mon_client.c @@ -60,9 +60,6 @@ struct ceph_monmap *ceph_monmap_decode(void *p, void *end) m->num_mon = num_mon; ceph_decode_copy(&p, m->mon_inst, num_mon*sizeof(m->mon_inst[0])); - if (p != end) - goto bad; - dout("monmap_decode epoch %d, num_mon %d\n", m->epoch, m->num_mon); for (i = 0; i < m->num_mon; i++) -- cgit v1.2.3 From 572033069dbc2cff8d4a2d2b34c576e1813fda70 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 9 Oct 2009 21:52:34 -0700 Subject: ceph: remove unused CEPH_MSG_{OSD,MDS}_GETMAP Signed-off-by: Sage Weil --- fs/ceph/ceph_fs.h | 2 -- fs/ceph/super.c | 2 -- 2 files changed, 4 deletions(-) diff --git a/fs/ceph/ceph_fs.h b/fs/ceph/ceph_fs.h index b3bbab1952c9..56af192cb430 100644 --- a/fs/ceph/ceph_fs.h +++ b/fs/ceph/ceph_fs.h @@ -100,7 +100,6 @@ int ceph_file_layout_is_valid(const struct ceph_file_layout *layout); #define CEPH_MSG_MON_SUBSCRIBE_ACK 16 /* client <-> mds */ -#define CEPH_MSG_MDS_GETMAP 20 #define CEPH_MSG_MDS_MAP 21 #define CEPH_MSG_CLIENT_SESSION 22 @@ -115,7 +114,6 @@ int ceph_file_layout_is_valid(const struct ceph_file_layout *layout); #define CEPH_MSG_CLIENT_CAPRELEASE 0x313 /* osd */ -#define CEPH_MSG_OSD_GETMAP 40 #define CEPH_MSG_OSD_MAP 41 #define CEPH_MSG_OSD_OP 42 #define CEPH_MSG_OSD_OPREPLY 43 diff --git a/fs/ceph/super.c b/fs/ceph/super.c index b3404a319c22..442a9900317e 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -231,7 +231,6 @@ const char *ceph_msg_type_name(int type) case CEPH_MSG_CLIENT_MOUNT_ACK: return "client_mount_ack"; case CEPH_MSG_STATFS: return "statfs"; case CEPH_MSG_STATFS_REPLY: return "statfs_reply"; - case CEPH_MSG_MDS_GETMAP: return "mds_getmap"; case CEPH_MSG_MDS_MAP: return "mds_map"; case CEPH_MSG_CLIENT_SESSION: return "client_session"; case CEPH_MSG_CLIENT_RECONNECT: return "client_reconnect"; @@ -242,7 +241,6 @@ const char *ceph_msg_type_name(int type) case CEPH_MSG_CLIENT_CAPRELEASE: return "client_cap_release"; case CEPH_MSG_CLIENT_SNAP: return "client_snap"; case CEPH_MSG_CLIENT_LEASE: return "client_lease"; - case CEPH_MSG_OSD_GETMAP: return "osd_getmap"; case CEPH_MSG_OSD_MAP: return "osd_map"; case CEPH_MSG_OSD_OP: return "osd_op"; case CEPH_MSG_OSD_OPREPLY: return "osd_opreply"; -- cgit v1.2.3 From 535bbb530764b1b2b3b732837f0e61e1baae7109 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 13 Oct 2009 12:55:26 -0700 Subject: ceph: add version field to message header This makes it easier for individual message types to indicate their particular encoding, and make future changes backward compatible. Signed-off-by: Sage Weil --- fs/ceph/msgr.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/ceph/msgr.h b/fs/ceph/msgr.h index 73921ae43faa..9abc879e25b1 100644 --- a/fs/ceph/msgr.h +++ b/fs/ceph/msgr.h @@ -21,7 +21,7 @@ * whenever the wire protocol changes. try to keep this string length * constant. */ -#define CEPH_BANNER "ceph v021" +#define CEPH_BANNER "ceph v022" #define CEPH_BANNER_MAX_LEN 30 @@ -125,6 +125,7 @@ struct ceph_msg_header { __le64 seq; /* message seq# for this session */ __le16 type; /* message type */ __le16 priority; /* priority. higher value == higher priority */ + __le16 version; /* version of message encoding */ __le32 front_len; /* bytes in main payload */ __le32 middle_len;/* bytes in middle payload */ -- cgit v1.2.3 From c89136ea4253c73e89e97f5138bb22d97ad9f564 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 14 Oct 2009 09:59:09 -0700 Subject: ceph: convert encode/decode macros to inlines This avoids the fugly pass by reference and makes the code a bit easier to read. Signed-off-by: Sage Weil --- fs/ceph/decode.h | 129 ++++++++++++++++++++++++++------------------------- fs/ceph/mds_client.c | 20 ++++---- fs/ceph/mdsmap.c | 38 +++++++-------- fs/ceph/mon_client.c | 4 +- fs/ceph/osd_client.c | 8 ++-- fs/ceph/osdmap.c | 70 ++++++++++++++-------------- 6 files changed, 137 insertions(+), 132 deletions(-) diff --git a/fs/ceph/decode.h b/fs/ceph/decode.h index fc2769df062d..91179fb2cc3f 100644 --- a/fs/ceph/decode.h +++ b/fs/ceph/decode.h @@ -3,12 +3,44 @@ #include +#include "types.h" + /* * in all cases, * void **p pointer to position pointer * void *end pointer to end of buffer (last byte + 1) */ +static inline u64 ceph_decode_64(void **p) +{ + u64 v = get_unaligned_le64(*p); + *p += sizeof(u64); + return v; +} +static inline u32 ceph_decode_32(void **p) +{ + u32 v = get_unaligned_le32(*p); + *p += sizeof(u32); + return v; +} +static inline u16 ceph_decode_16(void **p) +{ + u16 v = get_unaligned_le16(*p); + *p += sizeof(u16); + return v; +} +static inline u8 ceph_decode_8(void **p) +{ + u8 v = *(u8 *)*p; + (*p)++; + return v; +} +static inline void ceph_decode_copy(void **p, void *pv, size_t n) +{ + memcpy(pv, *p, n); + *p += n; +} + /* * bounds check input. */ @@ -18,48 +50,20 @@ goto bad; \ } while (0) -#define ceph_decode_64(p, v) \ - do { \ - v = get_unaligned_le64(*(p)); \ - *(p) += sizeof(u64); \ - } while (0) -#define ceph_decode_32(p, v) \ - do { \ - v = get_unaligned_le32(*(p)); \ - *(p) += sizeof(u32); \ - } while (0) -#define ceph_decode_16(p, v) \ - do { \ - v = get_unaligned_le16(*(p)); \ - *(p) += sizeof(u16); \ - } while (0) -#define ceph_decode_8(p, v) \ - do { \ - v = *(u8 *)*(p); \ - (*p)++; \ - } while (0) - -#define ceph_decode_copy(p, pv, n) \ - do { \ - memcpy(pv, *(p), n); \ - *(p) += n; \ - } while (0) - -/* bounds check too */ #define ceph_decode_64_safe(p, end, v, bad) \ do { \ ceph_decode_need(p, end, sizeof(u64), bad); \ - ceph_decode_64(p, v); \ + v = ceph_decode_64(p); \ } while (0) #define ceph_decode_32_safe(p, end, v, bad) \ do { \ ceph_decode_need(p, end, sizeof(u32), bad); \ - ceph_decode_32(p, v); \ + v = ceph_decode_32(p); \ } while (0) #define ceph_decode_16_safe(p, end, v, bad) \ do { \ ceph_decode_need(p, end, sizeof(u16), bad); \ - ceph_decode_16(p, v); \ + v = ceph_decode_16(p); \ } while (0) #define ceph_decode_copy_safe(p, end, pv, n, bad) \ @@ -71,41 +75,42 @@ /* * struct ceph_timespec <-> struct timespec */ -#define ceph_decode_timespec(ts, tv) \ - do { \ - (ts)->tv_sec = le32_to_cpu((tv)->tv_sec); \ - (ts)->tv_nsec = le32_to_cpu((tv)->tv_nsec); \ - } while (0) -#define ceph_encode_timespec(tv, ts) \ - do { \ - (tv)->tv_sec = cpu_to_le32((ts)->tv_sec); \ - (tv)->tv_nsec = cpu_to_le32((ts)->tv_nsec); \ - } while (0) - +static inline void ceph_decode_timespec(struct timespec *ts, + struct ceph_timespec *tv) +{ + ts->tv_sec = le32_to_cpu(tv->tv_sec); + ts->tv_nsec = le32_to_cpu(tv->tv_nsec); +} +static inline void ceph_encode_timespec(struct ceph_timespec *tv, + struct timespec *ts) +{ + tv->tv_sec = cpu_to_le32(ts->tv_sec); + tv->tv_nsec = cpu_to_le32(ts->tv_nsec); +} /* * encoders */ -#define ceph_encode_64(p, v) \ - do { \ - put_unaligned_le64(v, (__le64 *)*(p)); \ - *(p) += sizeof(u64); \ - } while (0) -#define ceph_encode_32(p, v) \ - do { \ - put_unaligned_le32(v, (__le32 *)*(p)); \ - *(p) += sizeof(u32); \ - } while (0) -#define ceph_encode_16(p, v) \ - do { \ - put_unaligned_le16(v), (__le16 *)*(p)); \ - *(p) += sizeof(u16); \ - } while (0) -#define ceph_encode_8(p, v) \ - do { \ - *(u8 *)*(p) = v; \ - (*(p))++; \ - } while (0) +static inline void ceph_encode_64(void **p, u64 v) +{ + put_unaligned_le64(v, (__le64 *)*p); + *p += sizeof(u64); +} +static inline void ceph_encode_32(void **p, u32 v) +{ + put_unaligned_le32(v, (__le32 *)*p); + *p += sizeof(u32); +} +static inline void ceph_encode_16(void **p, u16 v) +{ + put_unaligned_le16(v, (__le16 *)*p); + *p += sizeof(u16); +} +static inline void ceph_encode_8(void **p, u8 v) +{ + *(u8 *)*p = v; + (*p)++; +} /* * filepath, string encoders diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index de8ba4a242ca..2b19da31a8b3 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -136,9 +136,9 @@ static int parse_reply_info_dir(void **p, void *end, goto bad; ceph_decode_need(p, end, sizeof(num) + 2, bad); - ceph_decode_32(p, num); - ceph_decode_8(p, info->dir_end); - ceph_decode_8(p, info->dir_complete); + num = ceph_decode_32(p); + info->dir_end = ceph_decode_8(p); + info->dir_complete = ceph_decode_8(p); if (num == 0) goto done; @@ -160,7 +160,7 @@ static int parse_reply_info_dir(void **p, void *end, while (num) { /* dentry */ ceph_decode_need(p, end, sizeof(u32)*2, bad); - ceph_decode_32(p, info->dir_dname_len[i]); + info->dir_dname_len[i] = ceph_decode_32(p); ceph_decode_need(p, end, info->dir_dname_len[i], bad); info->dir_dname[i] = *p; *p += info->dir_dname_len[i]; @@ -1791,10 +1791,10 @@ static void handle_forward(struct ceph_mds_client *mdsc, struct ceph_msg *msg) from_mds = le64_to_cpu(msg->hdr.src.name.num); ceph_decode_need(&p, end, sizeof(u64)+2*sizeof(u32), bad); - ceph_decode_64(&p, tid); - ceph_decode_32(&p, next_mds); - ceph_decode_32(&p, fwd_seq); - ceph_decode_8(&p, must_resend); + tid = ceph_decode_64(&p); + next_mds = ceph_decode_32(&p); + fwd_seq = ceph_decode_32(&p); + must_resend = ceph_decode_8(&p); WARN_ON(must_resend); /* shouldn't happen. */ @@ -2783,8 +2783,8 @@ void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg) pr_err("got mdsmap with wrong fsid\n"); return; } - ceph_decode_32(&p, epoch); - ceph_decode_32(&p, maplen); + epoch = ceph_decode_32(&p); + maplen = ceph_decode_32(&p); dout("handle_map epoch %u len %d\n", epoch, (int)maplen); /* do we need it? */ diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c index 09180d8fafe4..80daea064470 100644 --- a/fs/ceph/mdsmap.c +++ b/fs/ceph/mdsmap.c @@ -60,21 +60,21 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end) ceph_decode_16_safe(p, end, version, bad); ceph_decode_need(p, end, 8*sizeof(u32) + sizeof(u64), bad); - ceph_decode_32(p, m->m_epoch); - ceph_decode_32(p, m->m_client_epoch); - ceph_decode_32(p, m->m_last_failure); - ceph_decode_32(p, m->m_root); - ceph_decode_32(p, m->m_session_timeout); - ceph_decode_32(p, m->m_session_autoclose); - ceph_decode_64(p, m->m_max_file_size); - ceph_decode_32(p, m->m_max_mds); + m->m_epoch = ceph_decode_32(p); + m->m_client_epoch = ceph_decode_32(p); + m->m_last_failure = ceph_decode_32(p); + m->m_root = ceph_decode_32(p); + m->m_session_timeout = ceph_decode_32(p); + m->m_session_autoclose = ceph_decode_32(p); + m->m_max_file_size = ceph_decode_64(p); + m->m_max_mds = ceph_decode_32(p); m->m_info = kcalloc(m->m_max_mds, sizeof(*m->m_info), GFP_NOFS); if (m->m_info == NULL) goto badmem; /* pick out active nodes from mds_info (state > 0) */ - ceph_decode_32(p, n); + n = ceph_decode_32(p); for (i = 0; i < n; i++) { u32 namelen; s32 mds, inc, state; @@ -86,18 +86,18 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end) ceph_decode_need(p, end, sizeof(addr) + 1 + sizeof(u32), bad); ceph_decode_copy(p, &addr, sizeof(addr)); - ceph_decode_8(p, infoversion); - ceph_decode_32(p, namelen); /* skip mds name */ + infoversion = ceph_decode_8(p); + namelen = ceph_decode_32(p); /* skip mds name */ *p += namelen; ceph_decode_need(p, end, 4*sizeof(u32) + sizeof(u64) + sizeof(addr) + sizeof(struct ceph_timespec), bad); - ceph_decode_32(p, mds); - ceph_decode_32(p, inc); - ceph_decode_32(p, state); - ceph_decode_64(p, state_seq); + mds = ceph_decode_32(p); + inc = ceph_decode_32(p); + state = ceph_decode_32(p); + state_seq = ceph_decode_64(p); *p += sizeof(addr); *p += sizeof(struct ceph_timespec); *p += sizeof(u32); @@ -123,8 +123,8 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end) kcalloc(num_export_targets, sizeof(u32), GFP_NOFS); for (j = 0; j < num_export_targets; j++) - ceph_decode_32(&pexport_targets, - m->m_info[mds].export_targets[j]); + m->m_info[mds].export_targets[j] = + ceph_decode_32(&pexport_targets); } else { m->m_info[mds].export_targets = NULL; } @@ -139,8 +139,8 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end) goto badmem; ceph_decode_need(p, end, sizeof(u32)*(n+1), bad); for (i = 0; i < n; i++) - ceph_decode_32(p, m->m_data_pg_pools[i]); - ceph_decode_32(p, m->m_cas_pg_pool); + m->m_data_pg_pools[i] = ceph_decode_32(p); + m->m_cas_pg_pool = ceph_decode_32(p); /* ok, we don't care about the rest. */ dout("mdsmap_decode success epoch %u\n", m->m_epoch); diff --git a/fs/ceph/mon_client.c b/fs/ceph/mon_client.c index d6c8e783467e..bea2be9077e4 100644 --- a/fs/ceph/mon_client.c +++ b/fs/ceph/mon_client.c @@ -45,9 +45,9 @@ struct ceph_monmap *ceph_monmap_decode(void *p, void *end) ceph_decode_need(&p, end, sizeof(fsid) + 2*sizeof(u32), bad); ceph_decode_copy(&p, &fsid, sizeof(fsid)); - ceph_decode_32(&p, epoch); + epoch = ceph_decode_32(&p); - ceph_decode_32(&p, num_mon); + num_mon = ceph_decode_32(&p); ceph_decode_need(&p, end, num_mon*sizeof(m->mon_inst[0]), bad); if (num_mon >= CEPH_MAX_MON) diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index 8e33928647f4..bbd9a5d23712 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -894,8 +894,8 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg) dout(" %d inc maps\n", nr_maps); while (nr_maps > 0) { ceph_decode_need(&p, end, 2*sizeof(u32), bad); - ceph_decode_32(&p, epoch); - ceph_decode_32(&p, maplen); + epoch = ceph_decode_32(&p); + maplen = ceph_decode_32(&p); ceph_decode_need(&p, end, maplen, bad); next = p + maplen; if (osdc->osdmap && osdc->osdmap->epoch+1 == epoch) { @@ -927,8 +927,8 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg) dout(" %d full maps\n", nr_maps); while (nr_maps) { ceph_decode_need(&p, end, 2*sizeof(u32), bad); - ceph_decode_32(&p, epoch); - ceph_decode_32(&p, maplen); + epoch = ceph_decode_32(&p); + maplen = ceph_decode_32(&p); ceph_decode_need(&p, end, maplen, bad); if (nr_maps > 1) { dout("skipping non-latest full map %u len %d\n", diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c index 342e5f80996b..6f0aeff4185a 100644 --- a/fs/ceph/osdmap.c +++ b/fs/ceph/osdmap.c @@ -67,7 +67,7 @@ static int crush_decode_uniform_bucket(void **p, void *end, { dout("crush_decode_uniform_bucket %p to %p\n", *p, end); ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad); - ceph_decode_32(p, b->item_weight); + b->item_weight = ceph_decode_32(p); return 0; bad: return -EINVAL; @@ -86,8 +86,8 @@ static int crush_decode_list_bucket(void **p, void *end, return -ENOMEM; ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); for (j = 0; j < b->h.size; j++) { - ceph_decode_32(p, b->item_weights[j]); - ceph_decode_32(p, b->sum_weights[j]); + b->item_weights[j] = ceph_decode_32(p); + b->sum_weights[j] = ceph_decode_32(p); } return 0; bad: @@ -105,7 +105,7 @@ static int crush_decode_tree_bucket(void **p, void *end, return -ENOMEM; ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad); for (j = 0; j < b->num_nodes; j++) - ceph_decode_32(p, b->node_weights[j]); + b->node_weights[j] = ceph_decode_32(p); return 0; bad: return -EINVAL; @@ -124,8 +124,8 @@ static int crush_decode_straw_bucket(void **p, void *end, return -ENOMEM; ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); for (j = 0; j < b->h.size; j++) { - ceph_decode_32(p, b->item_weights[j]); - ceph_decode_32(p, b->straws[j]); + b->item_weights[j] = ceph_decode_32(p); + b->straws[j] = ceph_decode_32(p); } return 0; bad: @@ -148,15 +148,15 @@ static struct crush_map *crush_decode(void *pbyval, void *end) return ERR_PTR(-ENOMEM); ceph_decode_need(p, end, 4*sizeof(u32), bad); - ceph_decode_32(p, magic); + magic = ceph_decode_32(p); if (magic != CRUSH_MAGIC) { pr_err("crush_decode magic %x != current %x\n", (unsigned)magic, (unsigned)CRUSH_MAGIC); goto bad; } - ceph_decode_32(p, c->max_buckets); - ceph_decode_32(p, c->max_rules); - ceph_decode_32(p, c->max_devices); + c->max_buckets = ceph_decode_32(p); + c->max_rules = ceph_decode_32(p); + c->max_devices = ceph_decode_32(p); c->device_parents = kcalloc(c->max_devices, sizeof(u32), GFP_NOFS); if (c->device_parents == NULL) @@ -208,11 +208,11 @@ static struct crush_map *crush_decode(void *pbyval, void *end) goto badmem; ceph_decode_need(p, end, 4*sizeof(u32), bad); - ceph_decode_32(p, b->id); - ceph_decode_16(p, b->type); - ceph_decode_16(p, b->alg); - ceph_decode_32(p, b->weight); - ceph_decode_32(p, b->size); + b->id = ceph_decode_32(p); + b->type = ceph_decode_16(p); + b->alg = ceph_decode_16(p); + b->weight = ceph_decode_32(p); + b->size = ceph_decode_32(p); dout("crush_decode bucket size %d off %x %p to %p\n", b->size, (int)(*p-start), *p, end); @@ -227,7 +227,7 @@ static struct crush_map *crush_decode(void *pbyval, void *end) ceph_decode_need(p, end, b->size*sizeof(u32), bad); for (j = 0; j < b->size; j++) - ceph_decode_32(p, b->items[j]); + b->items[j] = ceph_decode_32(p); switch (b->alg) { case CRUSH_BUCKET_UNIFORM: @@ -290,9 +290,9 @@ static struct crush_map *crush_decode(void *pbyval, void *end) ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */ ceph_decode_need(p, end, r->len*3*sizeof(u32), bad); for (j = 0; j < r->len; j++) { - ceph_decode_32(p, r->steps[j].op); - ceph_decode_32(p, r->steps[j].arg1); - ceph_decode_32(p, r->steps[j].arg2); + r->steps[j].op = ceph_decode_32(p); + r->steps[j].arg1 = ceph_decode_32(p); + r->steps[j].arg2 = ceph_decode_32(p); } } @@ -411,11 +411,11 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) ceph_decode_need(p, end, 2*sizeof(u64)+6*sizeof(u32), bad); ceph_decode_copy(p, &map->fsid, sizeof(map->fsid)); - ceph_decode_32(p, map->epoch); + map->epoch = ceph_decode_32(p); ceph_decode_copy(p, &map->created, sizeof(map->created)); ceph_decode_copy(p, &map->modified, sizeof(map->modified)); - ceph_decode_32(p, map->num_pools); + map->num_pools = ceph_decode_32(p); map->pg_pool = kcalloc(map->num_pools, sizeof(*map->pg_pool), GFP_NOFS); if (!map->pg_pool) { @@ -425,7 +425,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) ceph_decode_32_safe(p, end, max, bad); while (max--) { ceph_decode_need(p, end, 4+sizeof(map->pg_pool->v), bad); - ceph_decode_32(p, i); + i = ceph_decode_32(p); if (i >= map->num_pools) goto bad; ceph_decode_copy(p, &map->pg_pool[i].v, @@ -438,7 +438,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) ceph_decode_32_safe(p, end, map->flags, bad); - ceph_decode_32(p, max); + max = ceph_decode_32(p); /* (re)alloc osd arrays */ err = osdmap_set_max_osd(map, max); @@ -456,7 +456,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) *p += 4; /* skip length field (should match max) */ for (i = 0; i < map->max_osd; i++) - ceph_decode_32(p, map->osd_weight[i]); + map->osd_weight[i] = ceph_decode_32(p); *p += 4; /* skip length field (should match max) */ ceph_decode_copy(p, map->osd_addr, map->max_osd*sizeof(*map->osd_addr)); @@ -469,8 +469,8 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) struct ceph_pg_mapping *pg; ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad); - ceph_decode_64(p, pgid); - ceph_decode_32(p, n); + pgid = ceph_decode_64(p); + n = ceph_decode_32(p); ceph_decode_need(p, end, n * sizeof(u32), bad); pg = kmalloc(sizeof(*pg) + n*sizeof(u32), GFP_NOFS); if (!pg) { @@ -480,7 +480,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) pg->pgid = pgid; pg->len = n; for (j = 0; j < n; j++) - ceph_decode_32(p, pg->osds[j]); + pg->osds[j] = ceph_decode_32(p); err = __insert_pg_mapping(pg, &map->pg_temp); if (err) @@ -537,10 +537,10 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, ceph_decode_need(p, end, sizeof(fsid)+sizeof(modified)+2*sizeof(u32), bad); ceph_decode_copy(p, &fsid, sizeof(fsid)); - ceph_decode_32(p, epoch); + epoch = ceph_decode_32(p); BUG_ON(epoch != map->epoch+1); ceph_decode_copy(p, &modified, sizeof(modified)); - ceph_decode_32(p, new_flags); + new_flags = ceph_decode_32(p); /* full map? */ ceph_decode_32_safe(p, end, len, bad); @@ -568,7 +568,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, ceph_decode_need(p, end, 5*sizeof(u32), bad); /* new max? */ - ceph_decode_32(p, max); + max = ceph_decode_32(p); if (max >= 0) { err = osdmap_set_max_osd(map, max); if (err < 0) @@ -641,8 +641,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, while (len--) { u32 osd, off; ceph_decode_need(p, end, sizeof(u32)*2, bad); - ceph_decode_32(p, osd); - ceph_decode_32(p, off); + osd = ceph_decode_32(p); + off = ceph_decode_32(p); pr_info("osd%d weight 0x%x %s\n", osd, off, off == CEPH_OSD_IN ? "(in)" : (off == CEPH_OSD_OUT ? "(out)" : "")); @@ -659,8 +659,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, u64 pgid; u32 pglen; ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad); - ceph_decode_64(p, pgid); - ceph_decode_32(p, pglen); + pgid = ceph_decode_64(p); + pglen = ceph_decode_32(p); /* remove any? */ while (rbp && rb_entry(rbp, struct ceph_pg_mapping, @@ -683,7 +683,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, pg->pgid = pgid; pg->len = pglen; for (j = 0; j < len; j++) - ceph_decode_32(p, pg->osds[j]); + pg->osds[j] = ceph_decode_32(p); err = __insert_pg_mapping(pg, &map->pg_temp); if (err) goto bad; -- cgit v1.2.3 From f2cf418cec8d61df0651a0140a92a8c75246e14f Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 14 Oct 2009 14:09:07 -0700 Subject: ceph: initialize sb->s_bdi, bdi_unregister after kill_anon_super Writeback doesn't work without the bdi set, and writeback on umount doesn't work if we unregister the bdi too early. Signed-off-by: Sage Weil --- fs/ceph/super.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 442a9900317e..7f7d4759a443 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -97,6 +97,7 @@ static int ceph_syncfs(struct super_block *sb, int wait) dout("sync_fs %d\n", wait); ceph_osdc_sync(&ceph_client(sb)->osdc); ceph_mdsc_sync(&ceph_client(sb)->mdsc); + dout("sync_fs %d done\n", wait); return 0; } @@ -777,6 +778,7 @@ static int ceph_init_bdi(struct super_block *sb, struct ceph_client *client) err = bdi_init(&client->backing_dev_info); if (err < 0) return err; + sb->s_bdi = &client->backing_dev_info; /* set ra_pages based on rsize mount option? */ if (client->mount_args.rsize >= PAGE_CACHE_SIZE) @@ -861,8 +863,8 @@ static void ceph_kill_sb(struct super_block *s) struct ceph_client *client = ceph_sb_to_client(s); dout("kill_sb %p\n", s); ceph_mdsc_pre_umount(&client->mdsc); - bdi_unregister(&client->backing_dev_info); kill_anon_super(s); /* will call put_super after sb is r/o */ + bdi_unregister(&client->backing_dev_info); bdi_destroy(&client->backing_dev_info); ceph_destroy_client(client); } -- cgit v1.2.3 From cdc35f96277314bbfeefd0505410cabd69aebd8d Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 14 Oct 2009 14:24:19 -0700 Subject: ceph: move generic flushing code into helper Both callers of __mark_caps_flushing() do the same work; move it into the helper. Signed-off-by: Sage Weil --- fs/ceph/caps.c | 43 +++++++++++++++++++++---------------------- 1 file changed, 21 insertions(+), 22 deletions(-) diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 5c7d0e9bbb7b..111439d883d2 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -1272,16 +1272,30 @@ static void ceph_flush_snaps(struct ceph_inode_info *ci) /* * Add dirty inode to the flushing list. Assigned a seq number so we * can wait for caps to flush without starving. + * + * Called under i_lock. */ -static void __mark_caps_flushing(struct inode *inode, +static int __mark_caps_flushing(struct inode *inode, struct ceph_mds_session *session) { struct ceph_mds_client *mdsc = &ceph_client(inode->i_sb)->mdsc; struct ceph_inode_info *ci = ceph_inode(inode); - + int flushing; + + BUG_ON(ci->i_dirty_caps == 0); BUG_ON(list_empty(&ci->i_dirty_item)); + + flushing = ci->i_dirty_caps; + dout("__mark_caps_flushing flushing %s, flushing_caps %s -> %s\n", + ceph_cap_string(flushing), + ceph_cap_string(ci->i_flushing_caps), + ceph_cap_string(ci->i_flushing_caps | flushing)); + ci->i_flushing_caps |= flushing; + ci->i_dirty_caps = 0; + spin_lock(&mdsc->cap_dirty_lock); if (list_empty(&ci->i_flushing_item)) { + list_del_init(&ci->i_dirty_item); list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing); mdsc->num_cap_flushing++; ci->i_cap_flush_seq = ++mdsc->cap_flush_seq; @@ -1289,6 +1303,8 @@ static void __mark_caps_flushing(struct inode *inode, ci->i_cap_flush_seq); } spin_unlock(&mdsc->cap_dirty_lock); + + return flushing; } /* @@ -1504,17 +1520,8 @@ ack: took_snap_rwsem = 1; } - if (cap == ci->i_auth_cap && ci->i_dirty_caps) { - /* update dirty, flushing bits */ - flushing = ci->i_dirty_caps; - dout(" flushing %s, flushing_caps %s -> %s\n", - ceph_cap_string(flushing), - ceph_cap_string(ci->i_flushing_caps), - ceph_cap_string(ci->i_flushing_caps | flushing)); - ci->i_flushing_caps |= flushing; - ci->i_dirty_caps = 0; - __mark_caps_flushing(inode, session); - } + if (cap == ci->i_auth_cap && ci->i_dirty_caps) + flushing = __mark_caps_flushing(inode, session); mds = cap->mds; /* remember mds, so we don't repeat */ sent++; @@ -1605,15 +1612,7 @@ retry: if (cap->session->s_state < CEPH_MDS_SESSION_OPEN) goto out; - __mark_caps_flushing(inode, session); - - flushing = ci->i_dirty_caps; - dout(" flushing %s, flushing_caps %s -> %s\n", - ceph_cap_string(flushing), - ceph_cap_string(ci->i_flushing_caps), - ceph_cap_string(ci->i_flushing_caps | flushing)); - ci->i_flushing_caps |= flushing; - ci->i_dirty_caps = 0; + flushing = __mark_caps_flushing(inode, session); /* __send_cap drops i_lock */ delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want, -- cgit v1.2.3 From afcdaea3f2a78ce4873bd7e98a6d603bda23d167 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 14 Oct 2009 14:27:38 -0700 Subject: ceph: flush dirty caps via the cap_dirty list Previously we were flushing dirty caps by passing an extra flag when traversing the delayed caps list. Besides being a bit ugly, that can also miss caps that are dirty but didn't result in a cap requeue: notably, mark_caps_dirty(). Separate the flushing into a separate helper, and traverse the cap_dirty list. This also brings i_dirty_item in line with i_dirty_caps: we are on the list IFF caps != 0. We carry an inode ref IFF dirty_caps|flushing_caps != 0. Lose the unused return value from __ceph_mark_caps_dirty(). Signed-off-by: Sage Weil --- fs/ceph/caps.c | 76 ++++++++++++++++++++++++++++++++++++---------------- fs/ceph/mds_client.c | 6 ++--- fs/ceph/super.h | 6 ++--- 3 files changed, 59 insertions(+), 29 deletions(-) diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 111439d883d2..40b8d3471244 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -997,7 +997,7 @@ void ceph_queue_caps_release(struct inode *inode) if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) { dout(" release msg %p full\n", msg); list_move_tail(&msg->list_head, - &session->s_cap_releases_done); + &session->s_cap_releases_done); } else { dout(" release msg %p at %d/%d (%d)\n", msg, (int)le32_to_cpu(head->num), @@ -1292,14 +1292,20 @@ static int __mark_caps_flushing(struct inode *inode, ceph_cap_string(ci->i_flushing_caps | flushing)); ci->i_flushing_caps |= flushing; ci->i_dirty_caps = 0; + dout(" inode %p now !dirty\n", inode); spin_lock(&mdsc->cap_dirty_lock); + list_del_init(&ci->i_dirty_item); + + ci->i_cap_flush_seq = ++mdsc->cap_flush_seq; if (list_empty(&ci->i_flushing_item)) { - list_del_init(&ci->i_dirty_item); list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing); mdsc->num_cap_flushing++; - ci->i_cap_flush_seq = ++mdsc->cap_flush_seq; - dout(" inode %p now flushing seq %lld\n", &ci->vfs_inode, + dout(" inode %p now flushing seq %lld\n", inode, + ci->i_cap_flush_seq); + } else { + list_move_tail(&ci->i_flushing_item, &session->s_cap_flushing); + dout(" inode %p now flushing (more) seq %lld\n", inode, ci->i_cap_flush_seq); } spin_unlock(&mdsc->cap_dirty_lock); @@ -1555,32 +1561,33 @@ ack: * Mark caps dirty. If inode is newly dirty, add to the global dirty * list. */ -int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask) +void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask) { struct ceph_mds_client *mdsc = &ceph_client(ci->vfs_inode.i_sb)->mdsc; struct inode *inode = &ci->vfs_inode; - int was = __ceph_caps_dirty(ci); + int was_dirty = ci->i_dirty_caps; int dirty = 0; dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode, ceph_cap_string(mask), ceph_cap_string(ci->i_dirty_caps), ceph_cap_string(ci->i_dirty_caps | mask)); ci->i_dirty_caps |= mask; - if (!was) { + if (!was_dirty) { dout(" inode %p now dirty\n", &ci->vfs_inode); spin_lock(&mdsc->cap_dirty_lock); list_add(&ci->i_dirty_item, &mdsc->cap_dirty); spin_unlock(&mdsc->cap_dirty_lock); - igrab(inode); - dirty |= I_DIRTY_SYNC; + if (ci->i_flushing_caps == 0) { + igrab(inode); + dirty |= I_DIRTY_SYNC; + } } - if ((was & CEPH_CAP_FILE_BUFFER) && + if (((was_dirty | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) && (mask & CEPH_CAP_FILE_BUFFER)) dirty |= I_DIRTY_DATASYNC; if (dirty) __mark_inode_dirty(inode, dirty); __cap_delay_requeue(mdsc, ci); - return was; } /* @@ -2327,7 +2334,7 @@ static void handle_cap_flush_ack(struct inode *inode, int dirty = le32_to_cpu(m->dirty); int cleaned = 0; u64 flush_tid = le64_to_cpu(m->client_tid); - int old_dirty = 0, new_dirty = 0; + int drop = 0; int i; for (i = 0; i < CEPH_CAP_BITS; i++) @@ -2344,9 +2351,7 @@ static void handle_cap_flush_ack(struct inode *inode, if (ci->i_flushing_caps == (ci->i_flushing_caps & ~cleaned)) goto out; - old_dirty = ci->i_dirty_caps | ci->i_flushing_caps; ci->i_flushing_caps &= ~cleaned; - new_dirty = ci->i_dirty_caps | ci->i_flushing_caps; spin_lock(&mdsc->cap_dirty_lock); if (ci->i_flushing_caps == 0) { @@ -2360,17 +2365,19 @@ static void handle_cap_flush_ack(struct inode *inode, mdsc->num_cap_flushing--; wake_up(&mdsc->cap_flushing_wq); dout(" inode %p now !flushing\n", inode); - } - if (old_dirty && !new_dirty) { - dout(" inode %p now clean\n", inode); - list_del_init(&ci->i_dirty_item); + + if (ci->i_dirty_caps == 0) { + dout(" inode %p now clean\n", inode); + BUG_ON(!list_empty(&ci->i_dirty_item)); + drop = 1; + } } spin_unlock(&mdsc->cap_dirty_lock); wake_up(&ci->i_cap_wq); out: spin_unlock(&inode->i_lock); - if (old_dirty && !new_dirty) + if (drop) iput(inode); } @@ -2676,14 +2683,11 @@ bad: /* * Delayed work handler to process end of delayed cap release LRU list. */ -void ceph_check_delayed_caps(struct ceph_mds_client *mdsc, int flushdirty) +void ceph_check_delayed_caps(struct ceph_mds_client *mdsc) { struct ceph_inode_info *ci; int flags = CHECK_CAPS_NODELAY; - if (flushdirty) - flags |= CHECK_CAPS_FLUSH; - dout("check_delayed_caps\n"); while (1) { spin_lock(&mdsc->cap_delay_lock); @@ -2703,6 +2707,32 @@ void ceph_check_delayed_caps(struct ceph_mds_client *mdsc, int flushdirty) spin_unlock(&mdsc->cap_delay_lock); } +/* + * Flush all dirty caps to the mds + */ +void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc) +{ + struct ceph_inode_info *ci; + struct inode *inode; + + dout("flush_dirty_caps\n"); + spin_lock(&mdsc->cap_dirty_lock); + while (!list_empty(&mdsc->cap_dirty)) { + ci = list_first_entry(&mdsc->cap_dirty, + struct ceph_inode_info, + i_dirty_item); + inode = igrab(&ci->vfs_inode); + spin_unlock(&mdsc->cap_dirty_lock); + if (inode) { + ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH, + NULL); + iput(inode); + } + spin_lock(&mdsc->cap_dirty_lock); + } + spin_unlock(&mdsc->cap_dirty_lock); +} + /* * Drop open file reference. If we were the last open file, * we may need to release capabilities to the MDS (or schedule diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 2b19da31a8b3..12d66c0572ac 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -2504,7 +2504,7 @@ static void delayed_work(struct work_struct *work) int renew_caps; dout("mdsc delayed_work\n"); - ceph_check_delayed_caps(mdsc, 0); + ceph_check_delayed_caps(mdsc); mutex_lock(&mdsc->mutex); renew_interval = mdsc->mdsmap->m_session_timeout >> 2; @@ -2627,7 +2627,7 @@ void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc) mdsc->stopping = 1; drop_leases(mdsc); - ceph_check_delayed_caps(mdsc, 1); + ceph_flush_dirty_caps(mdsc); wait_requests(mdsc); } @@ -2677,7 +2677,7 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc) mutex_unlock(&mdsc->mutex); dout("sync want tid %lld flush_seq %lld\n", want_tid, want_flush); - ceph_check_delayed_caps(mdsc, 1); + ceph_flush_dirty_caps(mdsc); wait_unsafe_requests(mdsc, want_tid); wait_event(mdsc->cap_flushing_wq, check_cap_flush(mdsc, want_flush)); diff --git a/fs/ceph/super.h b/fs/ceph/super.h index cfd39ef4023e..0bbf58ab607e 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -524,7 +524,7 @@ static inline int __ceph_caps_dirty(struct ceph_inode_info *ci) { return ci->i_dirty_caps | ci->i_flushing_caps; } -extern int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask); +extern void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask); extern int ceph_caps_revoking(struct ceph_inode_info *ci, int mask); extern int __ceph_caps_used(struct ceph_inode_info *ci); @@ -814,8 +814,8 @@ extern void __ceph_flush_snaps(struct ceph_inode_info *ci, struct ceph_mds_session **psession); extern void ceph_check_caps(struct ceph_inode_info *ci, int flags, struct ceph_mds_session *session); -extern void ceph_check_delayed_caps(struct ceph_mds_client *mdsc, - int flushdirty); +extern void ceph_check_delayed_caps(struct ceph_mds_client *mdsc); +extern void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc); extern int ceph_encode_inode_release(void **p, struct inode *inode, int mds, int drop, int unless, int force); -- cgit v1.2.3 From 07bd10fb9853a41a7f0bb271721cca97d15eccae Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 14 Oct 2009 17:26:40 -0700 Subject: ceph: correct subscribe_ack msgpool payload size Defined a struct for the SUBSCRIBE_ACK, and use that to size the msgpool. Signed-off-by: Sage Weil --- fs/ceph/ceph_fs.h | 5 +++++ fs/ceph/mon_client.c | 11 +++++++---- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/fs/ceph/ceph_fs.h b/fs/ceph/ceph_fs.h index 56af192cb430..9b16e2e06ea6 100644 --- a/fs/ceph/ceph_fs.h +++ b/fs/ceph/ceph_fs.h @@ -162,6 +162,11 @@ struct ceph_mon_subscribe_item { __u8 onetime; } __attribute__ ((packed)); +struct ceph_mon_subscribe_ack { + __le32 duration; /* seconds */ + struct ceph_fsid fsid; +} __attribute__ ((packed)); + /* * mds states * > 0 -> in diff --git a/fs/ceph/mon_client.c b/fs/ceph/mon_client.c index bea2be9077e4..d52e52968d01 100644 --- a/fs/ceph/mon_client.c +++ b/fs/ceph/mon_client.c @@ -199,10 +199,12 @@ static void handle_subscribe_ack(struct ceph_mon_client *monc, struct ceph_msg *msg) { unsigned seconds; - void *p = msg->front.iov_base; - void *end = p + msg->front.iov_len; + struct ceph_mon_subscribe_ack *h = msg->front.iov_base; + + if (msg->front.iov_len < sizeof(*h)) + goto bad; + seconds = le32_to_cpu(h->duration); - ceph_decode_32_safe(&p, end, seconds, bad); mutex_lock(&monc->mutex); if (monc->hunting) { pr_info("mon%d %s session established\n", @@ -541,7 +543,8 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl) err = ceph_msgpool_init(&monc->msgpool_mount_ack, 4096, 1, false); if (err < 0) goto out; - err = ceph_msgpool_init(&monc->msgpool_subscribe_ack, 8, 1, false); + err = ceph_msgpool_init(&monc->msgpool_subscribe_ack, + sizeof(struct ceph_mon_subscribe_ack), 1, false); if (err < 0) goto out; err = ceph_msgpool_init(&monc->msgpool_statfs_reply, -- cgit v1.2.3 From 8f3bc053c610826a657714649ea596f07875db2e Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 14 Oct 2009 17:36:07 -0700 Subject: ceph: warn on allocation from msgpool with larger front_len Pass the front_len we need when pulling a message off a msgpool, and WARN if it is greater than the pool's size. Then try to allocate a new message (to continue without failing). Signed-off-by: Sage Weil --- fs/ceph/mon_client.c | 7 ++++--- fs/ceph/msgpool.c | 20 +++++++++++++++++--- fs/ceph/msgpool.h | 3 ++- fs/ceph/osd_client.c | 5 +++-- 4 files changed, 26 insertions(+), 9 deletions(-) diff --git a/fs/ceph/mon_client.c b/fs/ceph/mon_client.c index d52e52968d01..e6e954cac6b9 100644 --- a/fs/ceph/mon_client.c +++ b/fs/ceph/mon_client.c @@ -639,14 +639,15 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con, { struct ceph_mon_client *monc = con->private; int type = le16_to_cpu(hdr->type); + int front = le32_to_cpu(hdr->front_len); switch (type) { case CEPH_MSG_CLIENT_MOUNT_ACK: - return ceph_msgpool_get(&monc->msgpool_mount_ack); + return ceph_msgpool_get(&monc->msgpool_mount_ack, front); case CEPH_MSG_MON_SUBSCRIBE_ACK: - return ceph_msgpool_get(&monc->msgpool_subscribe_ack); + return ceph_msgpool_get(&monc->msgpool_subscribe_ack, front); case CEPH_MSG_STATFS_REPLY: - return ceph_msgpool_get(&monc->msgpool_statfs_reply); + return ceph_msgpool_get(&monc->msgpool_statfs_reply, front); } return ceph_alloc_msg(con, hdr); } diff --git a/fs/ceph/msgpool.c b/fs/ceph/msgpool.c index 39d4d7ed82ce..7599b3382076 100644 --- a/fs/ceph/msgpool.c +++ b/fs/ceph/msgpool.c @@ -101,14 +101,28 @@ int ceph_msgpool_resv(struct ceph_msgpool *pool, int delta) return ret; } -struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool) +struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, int front_len) { wait_queue_t wait; struct ceph_msg *msg; + if (front_len && front_len > pool->front_len) { + pr_err("msgpool_get pool %p need front %d, pool size is %d\n", + pool, front_len, pool->front_len); + WARN_ON(1); + + /* try to alloc a fresh message */ + msg = ceph_msg_new(0, front_len, 0, 0, NULL); + if (!IS_ERR(msg)) + return msg; + } + + if (!front_len) + front_len = pool->front_len; + if (pool->blocking) { /* mempool_t behavior; first try to alloc */ - msg = ceph_msg_new(0, pool->front_len, 0, 0, NULL); + msg = ceph_msg_new(0, front_len, 0, 0, NULL); if (!IS_ERR(msg)) return msg; } @@ -133,7 +147,7 @@ struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool) WARN_ON(1); /* maybe we can allocate it now? */ - msg = ceph_msg_new(0, pool->front_len, 0, 0, NULL); + msg = ceph_msg_new(0, front_len, 0, 0, NULL); if (!IS_ERR(msg)) return msg; diff --git a/fs/ceph/msgpool.h b/fs/ceph/msgpool.h index 07a2decaa6d8..bc834bfcd720 100644 --- a/fs/ceph/msgpool.h +++ b/fs/ceph/msgpool.h @@ -20,7 +20,8 @@ extern int ceph_msgpool_init(struct ceph_msgpool *pool, int front_len, int size, bool blocking); extern void ceph_msgpool_destroy(struct ceph_msgpool *pool); extern int ceph_msgpool_resv(struct ceph_msgpool *, int delta); -extern struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *); +extern struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *, + int front_len); extern void ceph_msgpool_put(struct ceph_msgpool *, struct ceph_msg *); #endif diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index bbd9a5d23712..0a254054a82a 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -161,7 +161,7 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, if (snapc) msg_size += sizeof(u64) * snapc->num_snaps; if (use_mempool) - msg = ceph_msgpool_get(&osdc->msgpool_op); + msg = ceph_msgpool_get(&osdc->msgpool_op, 0); else msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, 0, 0, NULL); if (IS_ERR(msg)) { @@ -1271,10 +1271,11 @@ static struct ceph_msg *alloc_msg(struct ceph_connection *con, struct ceph_osd *osd = con->private; struct ceph_osd_client *osdc = osd->o_osdc; int type = le16_to_cpu(hdr->type); + int front = le32_to_cpu(hdr->front_len); switch (type) { case CEPH_MSG_OSD_OPREPLY: - return ceph_msgpool_get(&osdc->msgpool_op_reply); + return ceph_msgpool_get(&osdc->msgpool_op_reply, front); } return ceph_alloc_msg(con, hdr); } -- cgit v1.2.3 From 76e3b390d41db9d69e254a09dd1aedd3e6aac25f Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Thu, 15 Oct 2009 18:13:53 -0700 Subject: ceph: move dirty caps code around Cleanup only. Signed-off-by: Sage Weil --- fs/ceph/caps.c | 70 +++++++++++++++++++++++++++++++--------------------------- 1 file changed, 37 insertions(+), 33 deletions(-) diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 40b8d3471244..7d166182e98d 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -1269,6 +1269,41 @@ static void ceph_flush_snaps(struct ceph_inode_info *ci) spin_unlock(&inode->i_lock); } +/* + * Mark caps dirty. If inode is newly dirty, add to the global dirty + * list. + */ +void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask) +{ + struct ceph_mds_client *mdsc = &ceph_client(ci->vfs_inode.i_sb)->mdsc; + struct inode *inode = &ci->vfs_inode; + int was = ci->i_dirty_caps; + int dirty = 0; + + dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode, + ceph_cap_string(mask), ceph_cap_string(was), + ceph_cap_string(was | mask)); + ci->i_dirty_caps |= mask; + if (was == 0) { + dout(" inode %p now dirty\n", &ci->vfs_inode); + BUG_ON(!list_empty(&ci->i_dirty_item)); + spin_lock(&mdsc->cap_dirty_lock); + list_add(&ci->i_dirty_item, &mdsc->cap_dirty); + spin_unlock(&mdsc->cap_dirty_lock); + if (ci->i_flushing_caps == 0) { + igrab(inode); + dirty |= I_DIRTY_SYNC; + } + } + BUG_ON(list_empty(&ci->i_dirty_item)); + if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) && + (mask & CEPH_CAP_FILE_BUFFER)) + dirty |= I_DIRTY_DATASYNC; + if (dirty) + __mark_inode_dirty(inode, dirty); + __cap_delay_requeue(mdsc, ci); +} + /* * Add dirty inode to the flushing list. Assigned a seq number so we * can wait for caps to flush without starving. @@ -1557,39 +1592,6 @@ ack: up_read(&mdsc->snap_rwsem); } -/* - * Mark caps dirty. If inode is newly dirty, add to the global dirty - * list. - */ -void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask) -{ - struct ceph_mds_client *mdsc = &ceph_client(ci->vfs_inode.i_sb)->mdsc; - struct inode *inode = &ci->vfs_inode; - int was_dirty = ci->i_dirty_caps; - int dirty = 0; - - dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode, - ceph_cap_string(mask), ceph_cap_string(ci->i_dirty_caps), - ceph_cap_string(ci->i_dirty_caps | mask)); - ci->i_dirty_caps |= mask; - if (!was_dirty) { - dout(" inode %p now dirty\n", &ci->vfs_inode); - spin_lock(&mdsc->cap_dirty_lock); - list_add(&ci->i_dirty_item, &mdsc->cap_dirty); - spin_unlock(&mdsc->cap_dirty_lock); - if (ci->i_flushing_caps == 0) { - igrab(inode); - dirty |= I_DIRTY_SYNC; - } - } - if (((was_dirty | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) && - (mask & CEPH_CAP_FILE_BUFFER)) - dirty |= I_DIRTY_DATASYNC; - if (dirty) - __mark_inode_dirty(inode, dirty); - __cap_delay_requeue(mdsc, ci); -} - /* * Try to flush dirty caps back to the auth mds. */ @@ -2370,6 +2372,8 @@ static void handle_cap_flush_ack(struct inode *inode, dout(" inode %p now clean\n", inode); BUG_ON(!list_empty(&ci->i_dirty_item)); drop = 1; + } else { + BUG_ON(list_empty(&ci->i_dirty_item)); } } spin_unlock(&mdsc->cap_dirty_lock); -- cgit v1.2.3 From 8fa9765576875200a7412a5300b5f0537211f038 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 16 Oct 2009 14:44:35 -0700 Subject: ceph: enable readahead Initialized bdi->ra_pages to enable readahead. Use 512KB default. Signed-off-by: Sage Weil --- fs/ceph/super.c | 1 + fs/ceph/super.h | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 7f7d4759a443..ab950fce4172 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -327,6 +327,7 @@ static int parse_mount_args(struct ceph_client *client, args->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; /* seconds */ args->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT; args->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT; + args->rsize = CEPH_MOUNT_RSIZE_DEFAULT; args->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL); args->cap_release_safety = CEPH_CAPS_PER_RELEASE * 4; args->max_readdir = 1024; diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 0bbf58ab607e..75556e97e865 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -69,7 +69,7 @@ struct ceph_mount_args { * defaults */ #define CEPH_MOUNT_TIMEOUT_DEFAULT 60 -#define CEPH_MOUNT_RSIZE_DEFAULT (128*1024) /* readahead */ +#define CEPH_MOUNT_RSIZE_DEFAULT (512*1024) /* readahead */ #define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024) #define CEPH_MSG_MAX_DATA_LEN (16*1024*1024) -- cgit v1.2.3 From ee7fdfaff7702bd209e3a013b2fc4643233f5465 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 19 Oct 2009 11:41:51 -0700 Subject: ceph: include preferred osd in placement seed Mix the preferred osd (if any) into the placement seed that is fed into the CRUSH object placement calculation. This prevents all the placement pgs from peering with the same osds. Rev the osd client protocol with this change. Signed-off-by: Sage Weil --- fs/ceph/ceph_fs.h | 2 +- fs/ceph/osdmap.c | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/ceph/ceph_fs.h b/fs/ceph/ceph_fs.h index 9b16e2e06ea6..f8f27e28a6bf 100644 --- a/fs/ceph/ceph_fs.h +++ b/fs/ceph/ceph_fs.h @@ -38,7 +38,7 @@ #define CEPH_OSD_PROTOCOL 7 /* cluster internal */ #define CEPH_MDS_PROTOCOL 9 /* cluster internal */ #define CEPH_MON_PROTOCOL 5 /* cluster internal */ -#define CEPH_OSDC_PROTOCOL 20 /* server/client */ +#define CEPH_OSDC_PROTOCOL 21 /* server/client */ #define CEPH_MDSC_PROTOCOL 29 /* server/client */ #define CEPH_MONC_PROTOCOL 15 /* server/client */ diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c index 6f0aeff4185a..72d75a239ac2 100644 --- a/fs/ceph/osdmap.c +++ b/fs/ceph/osdmap.c @@ -791,6 +791,8 @@ int ceph_calc_object_layout(struct ceph_object_layout *ol, pgid.pg64 = 0; /* start with it zeroed out */ pgid.pg.ps = ceph_full_name_hash(oid, strlen(oid)); pgid.pg.preferred = preferred; + if (preferred >= 0) + pgid.pg.ps += preferred; pgid.pg.pool = le32_to_cpu(fl->fl_pg_pool); if (preferred >= 0) dout("calc_object_layout '%s' pgid %d.%xp%d (%llx)\n", oid, -- cgit v1.2.3 From bb097ffaf833a40335b6dd5e4fa6f5ed0b223bdc Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 19 Oct 2009 16:17:31 -0700 Subject: ceph: v0.17 of client Signed-off-by: Sage Weil --- fs/ceph/ceph_fs.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/ceph/ceph_fs.h b/fs/ceph/ceph_fs.h index f8f27e28a6bf..ae523828c538 100644 --- a/fs/ceph/ceph_fs.h +++ b/fs/ceph/ceph_fs.h @@ -19,8 +19,8 @@ * Ceph release version */ #define CEPH_VERSION_MAJOR 0 -#define CEPH_VERSION_MINOR 16 -#define CEPH_VERSION_PATCH 1 +#define CEPH_VERSION_MINOR 17 +#define CEPH_VERSION_PATCH 0 #define _CEPH_STRINGIFY(x) #x #define CEPH_STRINGIFY(x) _CEPH_STRINGIFY(x) -- cgit v1.2.3 From 232d4b01319767b3ffa5d08962a81c805962be49 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 21 Oct 2009 11:21:49 -0700 Subject: ceph: move directory size logic to ceph_getattr We can't fill i_size with rbytes at the fill_file_size stage without adding additional checks for directories. Notably, we want st_blocks to remain 0 on directories so that 'du' still works. Fill in i_blocks, i_size specially in ceph_getattr instead. Signed-off-by: Sage Weil --- fs/ceph/inode.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 6097af790047..036873c42a78 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -568,8 +568,6 @@ static int fill_inode(struct inode *inode, queue_trunc = ceph_fill_file_size(inode, issued, le32_to_cpu(info->truncate_seq), le64_to_cpu(info->truncate_size), - S_ISDIR(inode->i_mode) ? - ci->i_rbytes : le64_to_cpu(info->size)); ceph_fill_file_time(inode, issued, le32_to_cpu(info->time_warp_seq), @@ -1603,6 +1601,7 @@ int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct inode *inode = dentry->d_inode; + struct ceph_inode_info *ci = ceph_inode(inode); int err; err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL); @@ -1613,8 +1612,11 @@ int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry, stat->dev = ceph_snap(inode); else stat->dev = 0; - if (S_ISDIR(inode->i_mode)) + if (S_ISDIR(inode->i_mode)) { + stat->size = ci->i_rbytes; + stat->blocks = 0; stat->blksize = 65536; + } } return err; } -- cgit v1.2.3 From ecb19c4649d7396737eb0d91a475661fe9d7c028 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Thu, 22 Oct 2009 10:53:02 -0700 Subject: ceph: remove small mon addr limit; use CEPH_MAX_MON where appropriate Get rid of separate max mon limit; use the system limit instead. This allows mounts when there are lots of mon addrs provided by mount.ceph (as with a host with lots of A/AAAA records). Signed-off-by: Sage Weil --- fs/ceph/super.c | 4 ++-- fs/ceph/super.h | 2 -- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/fs/ceph/super.c b/fs/ceph/super.c index ab950fce4172..81916250f0b6 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -314,7 +314,7 @@ static int parse_mount_args(struct ceph_client *client, int err; substring_t argstr[MAX_OPT_ARGS]; int num_mon; - struct ceph_entity_addr mon_addr[CEPH_MAX_MON_MOUNT_ADDR]; + struct ceph_entity_addr mon_addr[CEPH_MAX_MON]; int i; dout("parse_mount_args dev_name '%s'\n", dev_name); @@ -344,7 +344,7 @@ static int parse_mount_args(struct ceph_client *client, /* get mon ip(s) */ err = ceph_parse_ips(dev_name, *path, mon_addr, - CEPH_MAX_MON_MOUNT_ADDR, &num_mon); + CEPH_MAX_MON, &num_mon); if (err < 0) return err; diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 75556e97e865..3af42d9097ec 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -47,8 +47,6 @@ (!!((client)->mount_args.flags & CEPH_OPT_##opt)) -#define CEPH_MAX_MON_MOUNT_ADDR 5 - struct ceph_mount_args { int sb_flags; int flags; -- cgit v1.2.3 From 7b813c46021e8f4909772a5bbfb5212bd140764c Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 26 Oct 2009 22:07:53 -0700 Subject: ceph: reduce parse_mount_args stack usage Since we've increased the max mon count, we shouldn't put the addr array on the parse_mount_args stack. Put it on the heap instead. Signed-off-by: Sage Weil --- fs/ceph/super.c | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 81916250f0b6..deb51bd6ed83 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -314,12 +314,16 @@ static int parse_mount_args(struct ceph_client *client, int err; substring_t argstr[MAX_OPT_ARGS]; int num_mon; - struct ceph_entity_addr mon_addr[CEPH_MAX_MON]; + struct ceph_entity_addr *mon_addr; int i; dout("parse_mount_args dev_name '%s'\n", dev_name); memset(args, 0, sizeof(*args)); + mon_addr = kcalloc(CEPH_MAX_MON, sizeof(*mon_addr), GFP_KERNEL); + if (!mon_addr) + return -ENOMEM; + /* start with defaults */ args->sb_flags = flags; args->flags = CEPH_OPT_DEFAULT; @@ -333,27 +337,29 @@ static int parse_mount_args(struct ceph_client *client, args->max_readdir = 1024; /* ip1[:port1][,ip2[:port2]...]:/subdir/in/fs */ + err = -EINVAL; if (!dev_name) - return -EINVAL; + goto out; *path = strstr(dev_name, ":/"); if (*path == NULL) { pr_err("device name is missing path (no :/ in %s)\n", dev_name); - return -EINVAL; + goto out; } /* get mon ip(s) */ err = ceph_parse_ips(dev_name, *path, mon_addr, CEPH_MAX_MON, &num_mon); if (err < 0) - return err; + goto out; /* build initial monmap */ + err = -ENOMEM; client->monc.monmap = kzalloc(sizeof(*client->monc.monmap) + num_mon*sizeof(client->monc.monmap->mon_inst[0]), GFP_KERNEL); if (!client->monc.monmap) - return -ENOMEM; + goto out; for (i = 0; i < num_mon; i++) { client->monc.monmap->mon_inst[i].addr = mon_addr[i]; client->monc.monmap->mon_inst[i].addr.erank = 0; @@ -374,11 +380,11 @@ static int parse_mount_args(struct ceph_client *client, int token, intval, ret; if (!*c) continue; + err = -EINVAL; token = match_token((char *)c, arg_tokens, argstr); if (token < 0) { pr_err("bad mount option at '%s'\n", c); - return -EINVAL; - + goto out; } if (token < Opt_ip) { ret = match_int(&argstr[0], &intval); @@ -468,8 +474,11 @@ static int parse_mount_args(struct ceph_client *client, BUG_ON(token); } } + err = 0; - return 0; +out: + kfree(mon_addr); + return err; } static void release_mount_args(struct ceph_mount_args *args) -- cgit v1.2.3 From 6ca874e92d5e50beb8e351dfd8121947bafc79ec Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 26 Oct 2009 22:06:22 -0700 Subject: ceph: silence uninitialized variable warning Signed-off-by: Sage Weil --- fs/ceph/super.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/ceph/super.c b/fs/ceph/super.c index deb51bd6ed83..924e6cad0b66 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -808,7 +808,7 @@ static int ceph_get_sb(struct file_system_type *fs_type, struct ceph_client *client; int err; int (*compare_super)(struct super_block *, void *) = ceph_compare_super; - const char *path; + const char *path = 0; dout("ceph_get_sb\n"); -- cgit v1.2.3 From e53c2fe075feda1fd4f009956ac026dc24c3a199 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 27 Oct 2009 10:19:28 -0700 Subject: ceph: fix, clean up string mount arg parsing Clearly demark int and string argument options, and do not try to convert string arguments to ints. Signed-off-by: Sage Weil --- fs/ceph/super.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 924e6cad0b66..b094f5003ef8 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -264,9 +264,11 @@ enum { Opt_caps_wanted_delay_min, Opt_caps_wanted_delay_max, Opt_readdir_max_entries, + Opt_last_int, /* int args above */ Opt_snapdirname, Opt_secret, + Opt_last_string, /* string args above */ Opt_ip, Opt_noshare, @@ -386,14 +388,19 @@ static int parse_mount_args(struct ceph_client *client, pr_err("bad mount option at '%s'\n", c); goto out; } - if (token < Opt_ip) { + if (token < Opt_last_int) { ret = match_int(&argstr[0], &intval); if (ret < 0) { pr_err("bad mount option arg (not int) " "at '%s'\n", c); continue; } - dout("got token %d intval %d\n", token, intval); + dout("got int token %d val %d\n", token, intval); + } else if (token > Opt_last_int && token < Opt_last_string) { + dout("got string token %d val %s\n", token, + argstr[0].from); + } else { + dout("got token %d\n", token); } switch (token) { case Opt_fsidmajor: -- cgit v1.2.3 From 6b8051855d983db8480ff1ea1b02ef2b49203c22 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 27 Oct 2009 11:50:50 -0700 Subject: ceph: allocate and parse mount args before client instance This simplifies much of the error handling during mount. It also means that we have the mount args before client creation, and we can initialize based on those options. Signed-off-by: Sage Weil --- fs/ceph/addr.c | 4 +- fs/ceph/caps.c | 4 +- fs/ceph/dir.c | 7 ++-- fs/ceph/mds_client.c | 6 +-- fs/ceph/mon_client.c | 38 ++++++++++++++++++ fs/ceph/osd_client.c | 6 +-- fs/ceph/super.c | 106 +++++++++++++++++++++++---------------------------- fs/ceph/super.h | 8 ++-- 8 files changed, 105 insertions(+), 74 deletions(-) diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index c7d673ffe023..bf535815592d 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -600,8 +600,8 @@ static int ceph_writepages_start(struct address_space *mapping, pr_warning("writepage_start %p on forced umount\n", inode); return -EIO; /* we're in a forced umount, don't write! */ } - if (client->mount_args.wsize && client->mount_args.wsize < wsize) - wsize = client->mount_args.wsize; + if (client->mount_args->wsize && client->mount_args->wsize < wsize) + wsize = client->mount_args->wsize; if (wsize < PAGE_CACHE_SIZE) wsize = PAGE_CACHE_SIZE; max_pages_ever = wsize >> PAGE_CACHE_SHIFT; diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 7d166182e98d..8b863dbec70c 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -270,7 +270,7 @@ static void put_cap(struct ceph_cap *cap, * lots of free/alloc churn. */ if (caps_avail_count >= caps_reserve_count + - ceph_client(cap->ci->vfs_inode.i_sb)->mount_args.max_readdir) { + ceph_client(cap->ci->vfs_inode.i_sb)->mount_args->max_readdir) { caps_total_count--; kmem_cache_free(ceph_cap_cachep, cap); } else { @@ -388,7 +388,7 @@ static void __insert_cap_node(struct ceph_inode_info *ci, static void __cap_set_timeouts(struct ceph_mds_client *mdsc, struct ceph_inode_info *ci) { - struct ceph_mount_args *ma = &mdsc->client->mount_args; + struct ceph_mount_args *ma = mdsc->client->mount_args; ci->i_hold_caps_min = round_jiffies(jiffies + ma->caps_wanted_delay_min * HZ); diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 7bb8db524e58..4f7467961b09 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -225,7 +225,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir) int err; u32 ftype; struct ceph_mds_reply_info_parsed *rinfo; - const int max_entries = client->mount_args.max_readdir; + const int max_entries = client->mount_args->max_readdir; dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off); if (fi->at_end) @@ -479,7 +479,8 @@ struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, /* .snap dir? */ if (err == -ENOENT && ceph_vino(parent).ino != CEPH_INO_ROOT && /* no .snap in root dir */ - strcmp(dentry->d_name.name, client->mount_args.snapdir_name) == 0) { + strcmp(dentry->d_name.name, + client->mount_args->snapdir_name) == 0) { struct inode *inode = ceph_get_snapdir(parent); dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n", dentry, dentry->d_name.len, dentry->d_name.name, inode); @@ -550,7 +551,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, spin_lock(&dir->i_lock); dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags); if (strncmp(dentry->d_name.name, - client->mount_args.snapdir_name, + client->mount_args->snapdir_name, dentry->d_name.len) && (ci->i_ceph_flags & CEPH_I_COMPLETE) && (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) { diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 12d66c0572ac..210cb6623ea2 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -943,7 +943,7 @@ static int add_cap_releases(struct ceph_mds_client *mdsc, int err = -ENOMEM; if (extra < 0) - extra = mdsc->client->mount_args.cap_release_safety; + extra = mdsc->client->mount_args->cap_release_safety; spin_lock(&session->s_cap_lock); @@ -2601,7 +2601,7 @@ static void wait_requests(struct ceph_mds_client *mdsc) mutex_unlock(&mdsc->mutex); dout("wait_requests waiting for requests\n"); wait_for_completion_timeout(&mdsc->safe_umount_waiters, - client->mount_args.mount_timeout * HZ); + client->mount_args->mount_timeout * HZ); mutex_lock(&mdsc->mutex); /* tear down remaining requests */ @@ -2693,7 +2693,7 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc) int i; int n; struct ceph_client *client = mdsc->client; - unsigned long started, timeout = client->mount_args.mount_timeout * HZ; + unsigned long started, timeout = client->mount_args->mount_timeout * HZ; dout("close_sessions\n"); diff --git a/fs/ceph/mon_client.c b/fs/ceph/mon_client.c index e6e954cac6b9..61263c99c6a8 100644 --- a/fs/ceph/mon_client.c +++ b/fs/ceph/mon_client.c @@ -527,6 +527,40 @@ static void delayed_work(struct work_struct *work) mutex_unlock(&monc->mutex); } +/* + * On startup, we build a temporary monmap populated with the IPs + * provided by mount(2). + */ +static int build_initial_monmap(struct ceph_mon_client *monc) +{ + struct ceph_mount_args *args = monc->client->mount_args; + struct ceph_entity_addr *mon_addr = args->mon_addr; + int num_mon = args->num_mon; + int i; + + /* build initial monmap */ + monc->monmap = kzalloc(sizeof(*monc->monmap) + + num_mon*sizeof(monc->monmap->mon_inst[0]), + GFP_KERNEL); + if (!monc->monmap) + return -ENOMEM; + for (i = 0; i < num_mon; i++) { + monc->monmap->mon_inst[i].addr = mon_addr[i]; + monc->monmap->mon_inst[i].addr.erank = 0; + monc->monmap->mon_inst[i].addr.nonce = 0; + monc->monmap->mon_inst[i].name.type = + CEPH_ENTITY_TYPE_MON; + monc->monmap->mon_inst[i].name.num = cpu_to_le64(i); + } + monc->monmap->num_mon = num_mon; + + /* release addr memory */ + kfree(args->mon_addr); + args->mon_addr = NULL; + args->num_mon = 0; + return 0; +} + int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl) { int err = 0; @@ -537,6 +571,10 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl) monc->monmap = NULL; mutex_init(&monc->mutex); + err = build_initial_monmap(monc); + if (err) + goto out; + monc->con = NULL; /* msg pools */ diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index 0a254054a82a..7dc0f6299a52 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -444,7 +444,7 @@ static void register_request(struct ceph_osd_client *osdc, osdc->num_requests++; req->r_timeout_stamp = - jiffies + osdc->client->mount_args.osd_timeout*HZ; + jiffies + osdc->client->mount_args->osd_timeout*HZ; if (osdc->num_requests == 1) { osdc->timeout_tid = req->r_tid; @@ -609,7 +609,7 @@ static int __send_request(struct ceph_osd_client *osdc, reqhead->flags |= cpu_to_le32(req->r_flags); /* e.g., RETRY */ reqhead->reassert_version = req->r_reassert_version; - req->r_timeout_stamp = jiffies+osdc->client->mount_args.osd_timeout*HZ; + req->r_timeout_stamp = jiffies+osdc->client->mount_args->osd_timeout*HZ; ceph_msg_get(req->r_request); /* send consumes a ref */ ceph_con_send(&req->r_osd->o_con, req->r_request); @@ -632,7 +632,7 @@ static void handle_timeout(struct work_struct *work) container_of(work, struct ceph_osd_client, timeout_work.work); struct ceph_osd_request *req; struct ceph_osd *osd; - unsigned long timeout = osdc->client->mount_args.osd_timeout * HZ; + unsigned long timeout = osdc->client->mount_args->osd_timeout * HZ; unsigned long next_timeout = timeout + jiffies; struct rb_node *p; diff --git a/fs/ceph/super.c b/fs/ceph/super.c index b094f5003ef8..9b7815dfc035 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -110,7 +110,7 @@ static int ceph_syncfs(struct super_block *sb, int wait) static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt) { struct ceph_client *client = ceph_sb_to_client(mnt->mnt_sb); - struct ceph_mount_args *args = &client->mount_args; + struct ceph_mount_args *args = client->mount_args; if (args->flags & CEPH_OPT_FSID) seq_printf(m, ",fsidmajor=%llu,fsidminor%llu", @@ -307,24 +307,24 @@ static match_table_t arg_tokens = { }; -static int parse_mount_args(struct ceph_client *client, - int flags, char *options, const char *dev_name, - const char **path) +static struct ceph_mount_args *parse_mount_args(int flags, char *options, + const char *dev_name, + const char **path) { - struct ceph_mount_args *args = &client->mount_args; + struct ceph_mount_args *args; const char *c; - int err; + int err = -ENOMEM; substring_t argstr[MAX_OPT_ARGS]; - int num_mon; - struct ceph_entity_addr *mon_addr; - int i; - dout("parse_mount_args dev_name '%s'\n", dev_name); - memset(args, 0, sizeof(*args)); + args = kzalloc(sizeof(*args), GFP_KERNEL); + if (!args) + return ERR_PTR(-ENOMEM); + args->mon_addr = kcalloc(CEPH_MAX_MON, sizeof(*args->mon_addr), + GFP_KERNEL); + if (!args->mon_addr) + goto out; - mon_addr = kcalloc(CEPH_MAX_MON, sizeof(*mon_addr), GFP_KERNEL); - if (!mon_addr) - return -ENOMEM; + dout("parse_mount_args %p, dev_name '%s'\n", args, dev_name); /* start with defaults */ args->sb_flags = flags; @@ -350,29 +350,11 @@ static int parse_mount_args(struct ceph_client *client, } /* get mon ip(s) */ - err = ceph_parse_ips(dev_name, *path, mon_addr, - CEPH_MAX_MON, &num_mon); + err = ceph_parse_ips(dev_name, *path, args->mon_addr, + CEPH_MAX_MON, &args->num_mon); if (err < 0) goto out; - /* build initial monmap */ - err = -ENOMEM; - client->monc.monmap = kzalloc(sizeof(*client->monc.monmap) + - num_mon*sizeof(client->monc.monmap->mon_inst[0]), - GFP_KERNEL); - if (!client->monc.monmap) - goto out; - for (i = 0; i < num_mon; i++) { - client->monc.monmap->mon_inst[i].addr = mon_addr[i]; - client->monc.monmap->mon_inst[i].addr.erank = 0; - client->monc.monmap->mon_inst[i].addr.nonce = 0; - client->monc.monmap->mon_inst[i].name.type = - CEPH_ENTITY_TYPE_MON; - client->monc.monmap->mon_inst[i].name.num = cpu_to_le64(i); - } - client->monc.monmap->num_mon = num_mon; - memset(&args->my_addr.in_addr, 0, sizeof(args->my_addr.in_addr)); - /* path on server */ *path += 2; dout("server path '%s'\n", *path); @@ -415,7 +397,7 @@ static int parse_mount_args(struct ceph_client *client, &args->my_addr, 1, NULL); if (err < 0) - return err; + goto out; args->flags |= CEPH_OPT_MYIP; break; @@ -481,25 +463,28 @@ static int parse_mount_args(struct ceph_client *client, BUG_ON(token); } } - err = 0; + return args; out: - kfree(mon_addr); - return err; + kfree(args->mon_addr); + kfree(args); + return ERR_PTR(err); } -static void release_mount_args(struct ceph_mount_args *args) +static void destroy_mount_args(struct ceph_mount_args *args) { + dout("destroy_mount_args %p\n", args); kfree(args->snapdir_name); args->snapdir_name = NULL; kfree(args->secret); args->secret = NULL; + kfree(args); } /* * create a fresh client instance */ -static struct ceph_client *ceph_create_client(void) +static struct ceph_client *ceph_create_client(struct ceph_mount_args *args) { struct ceph_client *client; int err = -ENOMEM; @@ -515,6 +500,7 @@ static struct ceph_client *ceph_create_client(void) client->sb = NULL; client->mount_state = CEPH_MOUNT_MOUNTING; client->whoami = -1; + client->mount_args = args; client->msgr = NULL; @@ -577,7 +563,7 @@ static void ceph_destroy_client(struct ceph_client *client) if (client->wb_pagevec_pool) mempool_destroy(client->wb_pagevec_pool); - release_mount_args(&client->mount_args); + destroy_mount_args(client->mount_args); kfree(client); dout("destroy_client %p done\n", client); @@ -613,7 +599,7 @@ static struct dentry *open_root_dentry(struct ceph_client *client, req->r_ino1.ino = CEPH_INO_ROOT; req->r_ino1.snap = CEPH_NOSNAP; req->r_started = started; - req->r_timeout = client->mount_args.mount_timeout * HZ; + req->r_timeout = client->mount_args->mount_timeout * HZ; req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE); req->r_num_caps = 2; err = ceph_mdsc_do_request(mdsc, NULL, req); @@ -641,7 +627,7 @@ static int ceph_mount(struct ceph_client *client, struct vfsmount *mnt, { struct ceph_entity_addr *myaddr = NULL; int err; - unsigned long timeout = client->mount_args.mount_timeout * HZ; + unsigned long timeout = client->mount_args->mount_timeout * HZ; unsigned long started = jiffies; /* note the start time */ struct dentry *root; @@ -651,7 +637,7 @@ static int ceph_mount(struct ceph_client *client, struct vfsmount *mnt, /* initialize the messenger */ if (client->msgr == NULL) { if (ceph_test_opt(client, MYIP)) - myaddr = &client->mount_args.my_addr; + myaddr = &client->mount_args->my_addr; client->msgr = ceph_messenger_create(myaddr); if (IS_ERR(client->msgr)) { err = PTR_ERR(client->msgr); @@ -727,7 +713,7 @@ static int ceph_set_super(struct super_block *s, void *data) dout("set_super %p data %p\n", s, data); - s->s_flags = client->mount_args.sb_flags; + s->s_flags = client->mount_args->sb_flags; s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */ s->s_fs_info = client; @@ -756,7 +742,7 @@ fail: static int ceph_compare_super(struct super_block *sb, void *data) { struct ceph_client *new = data; - struct ceph_mount_args *args = &new->mount_args; + struct ceph_mount_args *args = new->mount_args; struct ceph_client *other = ceph_sb_to_client(sb); int i; @@ -778,7 +764,7 @@ static int ceph_compare_super(struct super_block *sb, void *data) } dout("mon ip matches existing sb %p\n", sb); } - if (args->sb_flags != other->mount_args.sb_flags) { + if (args->sb_flags != other->mount_args->sb_flags) { dout("flags differ\n"); return 0; } @@ -798,9 +784,9 @@ static int ceph_init_bdi(struct super_block *sb, struct ceph_client *client) sb->s_bdi = &client->backing_dev_info; /* set ra_pages based on rsize mount option? */ - if (client->mount_args.rsize >= PAGE_CACHE_SIZE) + if (client->mount_args->rsize >= PAGE_CACHE_SIZE) client->backing_dev_info.ra_pages = - (client->mount_args.rsize + PAGE_CACHE_SIZE - 1) + (client->mount_args->rsize + PAGE_CACHE_SIZE - 1) >> PAGE_SHIFT; err = bdi_register_dev(&client->backing_dev_info, sb->s_dev); @@ -816,19 +802,23 @@ static int ceph_get_sb(struct file_system_type *fs_type, int err; int (*compare_super)(struct super_block *, void *) = ceph_compare_super; const char *path = 0; + struct ceph_mount_args *args; dout("ceph_get_sb\n"); + args = parse_mount_args(flags, data, dev_name, &path); + if (IS_ERR(args)) { + err = PTR_ERR(args); + goto out_final; + } /* create client (which we may/may not use) */ - client = ceph_create_client(); - if (IS_ERR(client)) - return PTR_ERR(client); - - err = parse_mount_args(client, flags, data, dev_name, &path); - if (err < 0) - goto out; + client = ceph_create_client(args); + if (IS_ERR(client)) { + err = PTR_ERR(client); + goto out_final; + } - if (client->mount_args.flags & CEPH_OPT_NOSHARE) + if (client->mount_args->flags & CEPH_OPT_NOSHARE) compare_super = NULL; sb = sget(fs_type, compare_super, ceph_set_super, client); if (IS_ERR(sb)) { @@ -846,7 +836,7 @@ static int ceph_get_sb(struct file_system_type *fs_type, /* set up mempools */ err = -ENOMEM; client->wb_pagevec_pool = mempool_create_kmalloc_pool(10, - client->mount_args.wsize >> PAGE_CACHE_SHIFT); + client->mount_args->wsize >> PAGE_CACHE_SHIFT); if (!client->wb_pagevec_pool) goto out_splat; diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 3af42d9097ec..a3d4943581d0 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -42,13 +42,15 @@ #define CEPH_OPT_DEFAULT (CEPH_OPT_RBYTES) #define ceph_set_opt(client, opt) \ - (client)->mount_args.flags |= CEPH_OPT_##opt; + (client)->mount_args->flags |= CEPH_OPT_##opt; #define ceph_test_opt(client, opt) \ - (!!((client)->mount_args.flags & CEPH_OPT_##opt)) + (!!((client)->mount_args->flags & CEPH_OPT_##opt)) struct ceph_mount_args { int sb_flags; + int num_mon; + struct ceph_entity_addr *mon_addr; int flags; int mount_timeout; int caps_wanted_delay_min, caps_wanted_delay_max; @@ -115,7 +117,7 @@ struct ceph_client { struct dentry *debugfs_dir, *debugfs_dentry_lru, *debugfs_caps; struct mutex mount_mutex; /* serialize mount attempts */ - struct ceph_mount_args mount_args; + struct ceph_mount_args *mount_args; struct ceph_fsid fsid; struct super_block *sb; -- cgit v1.2.3 From fbbccec9c6218cbc9ff47c6d88bfc6b52079e3ea Mon Sep 17 00:00:00 2001 From: Noah Watkins Date: Wed, 28 Oct 2009 11:54:49 -0700 Subject: ceph: replace list_entry with container_of Usage of non-list.h list_entry function for container_of functionality replaced with direct use of container_of. Signed-off-by: Noah Watkins Signed-off-by: Sage Weil --- fs/ceph/super.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/ceph/super.h b/fs/ceph/super.h index a3d4943581d0..05947b96c524 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -364,7 +364,7 @@ struct ceph_inode_info { static inline struct ceph_inode_info *ceph_inode(struct inode *inode) { - return list_entry(inode, struct ceph_inode_info, vfs_inode); + return container_of(inode, struct ceph_inode_info, vfs_inode); } static inline void ceph_i_clear(struct inode *inode, unsigned mask) -- cgit v1.2.3 From 35e054a66e07f508aa7cfabc7db1757379093689 Mon Sep 17 00:00:00 2001 From: Noah Watkins Date: Wed, 28 Oct 2009 14:04:48 -0700 Subject: ceph: remove redundant use of le32_to_cpu Using stripe unit size calculated and saved on the stack to avoid a redundant call to le32_to_cpu. Signed-off-by: Noah Watkins Signed-off-by: Sage Weil --- fs/ceph/osdmap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c index 72d75a239ac2..60012e05bdfd 100644 --- a/fs/ceph/osdmap.c +++ b/fs/ceph/osdmap.c @@ -735,7 +735,7 @@ void ceph_calc_file_object_mapping(struct ceph_file_layout *layout, dout("mapping %llu~%llu osize %u fl_su %u\n", off, *plen, osize, su); - su_per_object = osize / le32_to_cpu(layout->fl_stripe_unit); + su_per_object = osize / su; dout("osize %u / su %u = su_per_object %u\n", osize, su, su_per_object); -- cgit v1.2.3 From 5600f5ebd318f7af6f4b19a29f08d18bb85264e5 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 28 Oct 2009 14:57:25 -0700 Subject: ceph: correct comment to match striping calculation The object extent offset is the file offset _modulo_ the stripe unit. The code was correct, the comment was wrong. Reported-by: Noah Watkins Signed-off-by: Sage Weil --- fs/ceph/osdmap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c index 60012e05bdfd..a9a4143234fa 100644 --- a/fs/ceph/osdmap.c +++ b/fs/ceph/osdmap.c @@ -752,7 +752,7 @@ void ceph_calc_file_object_mapping(struct ceph_file_layout *layout, *bno = objsetno * sc + stripepos; dout("objset %u * sc %u = bno %u\n", objsetno, sc, (unsigned)*bno); - /* *oxoff = *off / layout->fl_stripe_unit; */ + /* *oxoff = *off % layout->fl_stripe_unit; */ t = off; *oxoff = do_div(t, su); *oxlen = min_t(u64, *plen, su - *oxoff); -- cgit v1.2.3 From 645a102581b3639836b17d147c35d574fd6e8267 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 28 Oct 2009 15:15:05 -0700 Subject: ceph: fix object striping calculation for non-default striping schemes We were incorrectly calculationing of object offset. If we have multiple stripe units per object, we need to shift to the start of the current su in addition to the offset within the su. Also rename bno to ono (object number) to avoid some variable naming confusion. Signed-off-by: Sage Weil --- fs/ceph/osdmap.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c index a9a4143234fa..5a5520c5a2b3 100644 --- a/fs/ceph/osdmap.c +++ b/fs/ceph/osdmap.c @@ -723,7 +723,7 @@ bad: */ void ceph_calc_file_object_mapping(struct ceph_file_layout *layout, u64 off, u64 *plen, - u64 *bno, + u64 *ono, u64 *oxoff, u64 *oxlen) { u32 osize = le32_to_cpu(layout->fl_object_size); @@ -750,11 +750,14 @@ void ceph_calc_file_object_mapping(struct ceph_file_layout *layout, stripepos = bl % sc; objsetno = stripeno / su_per_object; - *bno = objsetno * sc + stripepos; - dout("objset %u * sc %u = bno %u\n", objsetno, sc, (unsigned)*bno); - /* *oxoff = *off % layout->fl_stripe_unit; */ + *ono = objsetno * sc + stripepos; + dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned)*ono); + + /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */ t = off; *oxoff = do_div(t, su); + *oxoff += (stripeno % su_per_object) * su; + *oxlen = min_t(u64, *plen, su - *oxoff); *plen = *oxlen; -- cgit v1.2.3 From ff1d1f7179363209b7f1493ea39b666f50d05cf4 Mon Sep 17 00:00:00 2001 From: Noah Watkins Date: Fri, 30 Oct 2009 12:57:30 -0700 Subject: ceph: fix intra strip unit length calculation Commit 645a102581b3639836b17d147c35d574fd6e8267 fixes calculation of object offset for layouts with multiple stripes per object. This updates the calculation of the length written to take into account multiple stripes per object. Signed-off-by: Noah Watkins Signed-off-by: Sage Weil --- fs/ceph/osdmap.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c index 5a5520c5a2b3..d62e111b8a34 100644 --- a/fs/ceph/osdmap.c +++ b/fs/ceph/osdmap.c @@ -731,7 +731,7 @@ void ceph_calc_file_object_mapping(struct ceph_file_layout *layout, u32 sc = le32_to_cpu(layout->fl_stripe_count); u32 bl, stripeno, stripepos, objsetno; u32 su_per_object; - u64 t; + u64 t, su_offset; dout("mapping %llu~%llu osize %u fl_su %u\n", off, *plen, osize, su); @@ -755,10 +755,15 @@ void ceph_calc_file_object_mapping(struct ceph_file_layout *layout, /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */ t = off; - *oxoff = do_div(t, su); - *oxoff += (stripeno % su_per_object) * su; - - *oxlen = min_t(u64, *plen, su - *oxoff); + su_offset = do_div(t, su); + *oxoff = su_offset + (stripeno % su_per_object) * su; + + /* + * Calculate the length of the extent being written to the selected + * object. This is the minimum of the full length requested (plen) or + * the remainder of the current stripe being written to. + */ + *oxlen = min_t(u64, *plen, su - su_offset); *plen = *oxlen; dout(" obj extent %llu~%llu\n", *oxoff, *oxlen); -- cgit v1.2.3 From 63ff78b25c4b204075b5b98afcac6ad3639d43fe Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Sun, 1 Nov 2009 17:51:15 -0800 Subject: ceph: fix uninitialized err variable Fixes warning fs/ceph/xattr.c: In function '__build_xattrs': fs/ceph/xattr.c:353: warning: 'err' may be used uninitialized in this function Signed-off-by: Sage Weil --- fs/ceph/xattr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index 65b3a84bbb2e..1a48a55c5109 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c @@ -350,7 +350,7 @@ static int __build_xattrs(struct inode *inode) struct ceph_inode_info *ci = ceph_inode(inode); int xattr_version; struct ceph_inode_xattr **xattrs = NULL; - int err; + int err = 0; int i; dout("__build_xattrs() len=%d\n", -- cgit v1.2.3 From 33aa96e7430d215e2ee779f65cdad0f6d4571fe1 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Sun, 1 Nov 2009 17:53:24 -0800 Subject: crush: always return a value from crush_bucket_choose Even when we encounter a corrupt bucket. We still BUG(). This fixes the warning fs/ceph/crush/mapper.c: In function 'crush_choose': fs/ceph/crush/mapper.c:352: warning: control may reach end of non-void function 'crush_bucket_choose' being inlined Signed-off-by: Sage Weil --- fs/ceph/crush/mapper.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/ceph/crush/mapper.c b/fs/ceph/crush/mapper.c index c268393adfcb..54f3f402af60 100644 --- a/fs/ceph/crush/mapper.c +++ b/fs/ceph/crush/mapper.c @@ -253,7 +253,7 @@ static int crush_bucket_choose(struct crush_bucket *in, int x, int r) x, r); default: BUG_ON(1); -/* return in->items[0] */; + return in->items[0]; } } -- cgit v1.2.3 From 859e7b149362475672e2a996f29b8f45cbb34d82 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 2 Nov 2009 09:32:47 -0800 Subject: ceph: init/destroy bdi in client create/destroy helpers This keeps bdi setup/teardown in line with client life cycle. Signed-off-by: Sage Weil --- fs/ceph/super.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 9b7815dfc035..0ae40bad53c4 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -508,10 +508,14 @@ static struct ceph_client *ceph_create_client(struct ceph_mount_args *args) client->signed_ticket = NULL; client->signed_ticket_len = 0; + err = bdi_init(&client->backing_dev_info); + if (err < 0) + goto fail; + err = -ENOMEM; client->wb_wq = create_workqueue("ceph-writeback"); if (client->wb_wq == NULL) - goto fail; + goto fail_bdi; client->pg_inv_wq = create_singlethread_workqueue("ceph-pg-invalid"); if (client->pg_inv_wq == NULL) goto fail_wb_wq; @@ -537,6 +541,8 @@ fail_pg_inv_wq: destroy_workqueue(client->pg_inv_wq); fail_wb_wq: destroy_workqueue(client->wb_wq); +fail_bdi: + bdi_destroy(&client->backing_dev_info); fail: kfree(client); return ERR_PTR(err); @@ -774,13 +780,10 @@ static int ceph_compare_super(struct super_block *sb, void *data) /* * construct our own bdi so we can control readahead, etc. */ -static int ceph_init_bdi(struct super_block *sb, struct ceph_client *client) +static int ceph_register_bdi(struct super_block *sb, struct ceph_client *client) { int err; - err = bdi_init(&client->backing_dev_info); - if (err < 0) - return err; sb->s_bdi = &client->backing_dev_info; /* set ra_pages based on rsize mount option? */ @@ -840,7 +843,7 @@ static int ceph_get_sb(struct file_system_type *fs_type, if (!client->wb_pagevec_pool) goto out_splat; - err = ceph_init_bdi(sb, client); + err = ceph_register_bdi(sb, client); if (err < 0) goto out_splat; } -- cgit v1.2.3 From 63f2d211954b790fea0a9caeae605c7956535af6 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 3 Nov 2009 15:17:56 -0800 Subject: ceph: use fixed endian encoding for ceph_entity_addr We exchange struct ceph_entity_addr over the wire and store it on disk. The sockaddr_storage.ss_family field, however, is host endianness. So, fix ss_family endianness to big endian when sending/receiving over the wire. Signed-off-by: Sage Weil --- fs/ceph/decode.h | 16 ++++++++++++++-- fs/ceph/mdsmap.c | 1 + fs/ceph/messenger.c | 23 ++++++++++++++++++----- fs/ceph/messenger.h | 1 + fs/ceph/mon_client.c | 2 ++ fs/ceph/msgr.h | 2 +- fs/ceph/osdmap.c | 3 +++ 7 files changed, 40 insertions(+), 8 deletions(-) diff --git a/fs/ceph/decode.h b/fs/ceph/decode.h index 91179fb2cc3f..a382aecc55bb 100644 --- a/fs/ceph/decode.h +++ b/fs/ceph/decode.h @@ -76,18 +76,30 @@ static inline void ceph_decode_copy(void **p, void *pv, size_t n) * struct ceph_timespec <-> struct timespec */ static inline void ceph_decode_timespec(struct timespec *ts, - struct ceph_timespec *tv) + const struct ceph_timespec *tv) { ts->tv_sec = le32_to_cpu(tv->tv_sec); ts->tv_nsec = le32_to_cpu(tv->tv_nsec); } static inline void ceph_encode_timespec(struct ceph_timespec *tv, - struct timespec *ts) + const struct timespec *ts) { tv->tv_sec = cpu_to_le32(ts->tv_sec); tv->tv_nsec = cpu_to_le32(ts->tv_nsec); } +/* + * sockaddr_storage <-> ceph_sockaddr + */ +static inline void ceph_encode_addr(struct ceph_entity_addr *a) +{ + a->in_addr.ss_family = htons(a->in_addr.ss_family); +} +static inline void ceph_decode_addr(struct ceph_entity_addr *a) +{ + a->in_addr.ss_family = ntohs(a->in_addr.ss_family); +} + /* * encoders */ diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c index 80daea064470..4226c810ce22 100644 --- a/fs/ceph/mdsmap.c +++ b/fs/ceph/mdsmap.c @@ -86,6 +86,7 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end) ceph_decode_need(p, end, sizeof(addr) + 1 + sizeof(u32), bad); ceph_decode_copy(p, &addr, sizeof(addr)); + ceph_decode_addr(&addr); infoversion = ceph_decode_8(p); namelen = ceph_decode_32(p); /* skip mds name */ *p += namelen; diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index b48abc0b3be7..6ff44bbddf67 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -12,6 +12,7 @@ #include "super.h" #include "messenger.h" +#include "decode.h" /* * Ceph uses the messenger to exchange ceph_msg messages with other @@ -97,6 +98,12 @@ const char *pr_addr(const struct sockaddr_storage *ss) return s; } +static void encode_my_addr(struct ceph_messenger *msgr) +{ + memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr)); + ceph_encode_addr(&msgr->my_enc_addr); +} + /* * work queue for all reading and writing to/from the socket. */ @@ -590,12 +597,12 @@ static void prepare_write_connect(struct ceph_messenger *msgr, con->out_kvec[0].iov_base = CEPH_BANNER; con->out_kvec[0].iov_len = len; - con->out_kvec[1].iov_base = &msgr->inst.addr; - con->out_kvec[1].iov_len = sizeof(msgr->inst.addr); + con->out_kvec[1].iov_base = &msgr->my_enc_addr; + con->out_kvec[1].iov_len = sizeof(msgr->my_enc_addr); con->out_kvec[2].iov_base = &con->out_connect; con->out_kvec[2].iov_len = sizeof(con->out_connect); con->out_kvec_left = 3; - con->out_kvec_bytes = len + sizeof(msgr->inst.addr) + + con->out_kvec_bytes = len + sizeof(msgr->my_enc_addr) + sizeof(con->out_connect); con->out_kvec_cur = con->out_kvec; con->out_more = 0; @@ -976,6 +983,9 @@ static int process_connect(struct ceph_connection *con) if (verify_hello(con) < 0) return -1; + ceph_decode_addr(&con->actual_peer_addr); + ceph_decode_addr(&con->peer_addr_for_me); + /* * Make sure the other end is who we wanted. note that the other * end may not yet know their ip address, so if it's 0.0.0.0, give @@ -1005,6 +1015,7 @@ static int process_connect(struct ceph_connection *con) &con->peer_addr_for_me.in_addr, sizeof(con->peer_addr_for_me.in_addr)); addr_set_port(&con->msgr->inst.addr.in_addr, port); + encode_my_addr(con->msgr); dout("process_connect learned my addr is %s\n", pr_addr(&con->msgr->inst.addr.in_addr)); } @@ -1780,6 +1791,7 @@ struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr) /* select a random nonce */ get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce)); + encode_my_addr(msgr); dout("messenger_create %p\n", msgr); return msgr; @@ -1806,8 +1818,9 @@ void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg) } /* set src+dst */ - msg->hdr.src = con->msgr->inst; - msg->hdr.orig_src = con->msgr->inst; + msg->hdr.src.name = con->msgr->inst.name; + msg->hdr.src.addr = con->msgr->my_enc_addr; + msg->hdr.orig_src = msg->hdr.src; msg->hdr.dst_erank = con->peer_addr.erank; /* queue */ diff --git a/fs/ceph/messenger.h b/fs/ceph/messenger.h index dcd98b64dca9..e016fa7cf970 100644 --- a/fs/ceph/messenger.h +++ b/fs/ceph/messenger.h @@ -53,6 +53,7 @@ extern const char *ceph_name_type_str(int t); struct ceph_messenger { struct ceph_entity_inst inst; /* my name+address */ + struct ceph_entity_addr my_enc_addr; struct page *zero_page; /* used in certain error cases */ bool nocrc; diff --git a/fs/ceph/mon_client.c b/fs/ceph/mon_client.c index 61263c99c6a8..95b76e761e18 100644 --- a/fs/ceph/mon_client.c +++ b/fs/ceph/mon_client.c @@ -59,6 +59,8 @@ struct ceph_monmap *ceph_monmap_decode(void *p, void *end) m->epoch = epoch; m->num_mon = num_mon; ceph_decode_copy(&p, m->mon_inst, num_mon*sizeof(m->mon_inst[0])); + for (i = 0; i < num_mon; i++) + ceph_decode_addr(&m->mon_inst[i].addr); dout("monmap_decode epoch %d, num_mon %d\n", m->epoch, m->num_mon); diff --git a/fs/ceph/msgr.h b/fs/ceph/msgr.h index 9abc879e25b1..8e3ea2eb1bf5 100644 --- a/fs/ceph/msgr.h +++ b/fs/ceph/msgr.h @@ -21,7 +21,7 @@ * whenever the wire protocol changes. try to keep this string length * constant. */ -#define CEPH_BANNER "ceph v022" +#define CEPH_BANNER "ceph v023" #define CEPH_BANNER_MAX_LEN 30 diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c index d62e111b8a34..cd7bb265d789 100644 --- a/fs/ceph/osdmap.c +++ b/fs/ceph/osdmap.c @@ -460,6 +460,8 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) *p += 4; /* skip length field (should match max) */ ceph_decode_copy(p, map->osd_addr, map->max_osd*sizeof(*map->osd_addr)); + for (i = 0; i < map->max_osd; i++) + ceph_decode_addr(&map->osd_addr[i]); /* pg_temp */ ceph_decode_32_safe(p, end, len, bad); @@ -619,6 +621,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, struct ceph_entity_addr addr; ceph_decode_32_safe(p, end, osd, bad); ceph_decode_copy_safe(p, end, &addr, sizeof(addr), bad); + ceph_decode_addr(&addr); pr_info("osd%d up\n", osd); BUG_ON(osd >= map->max_osd); map->osd_state[osd] |= CEPH_OSD_UP; -- cgit v1.2.3 From 51042122d4f85e0f8ee577a4230f172fcc57c456 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 4 Nov 2009 11:39:12 -0800 Subject: ceph: fix endian conversions for ceph_pg The endian conversions don't quite work with the old union ceph_pg. Just make it a regular struct, and make each field __le. This is simpler and it has the added bonus of actually working. Signed-off-by: Sage Weil --- fs/ceph/ioctl.c | 4 +-- fs/ceph/osd_client.c | 8 ++--- fs/ceph/osdmap.c | 100 +++++++++++++++++++++++++++++++-------------------- fs/ceph/osdmap.h | 5 +-- fs/ceph/rados.h | 13 +++---- 5 files changed, 75 insertions(+), 55 deletions(-) diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c index e4f99eff5d93..4c33e19fc241 100644 --- a/fs/ceph/ioctl.c +++ b/fs/ceph/ioctl.c @@ -99,7 +99,7 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg) u64 len = 1, olen; u64 tmp; struct ceph_object_layout ol; - union ceph_pg pgid; + struct ceph_pg pgid; /* copy and validate */ if (copy_from_user(&dl, arg, sizeof(dl))) @@ -121,7 +121,7 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg) ceph_calc_object_layout(&ol, dl.object_name, &ci->i_layout, osdc->osdmap); - pgid.pg64 = le64_to_cpu(ol.ol_pgid); + pgid = ol.ol_pgid; dl.osd = ceph_calc_pg_primary(osdc->osdmap, pgid); if (dl.osd >= 0) { struct ceph_entity_addr *a = diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index 7dc0f6299a52..7db14ba6261c 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -520,7 +520,7 @@ static int __map_osds(struct ceph_osd_client *osdc, struct ceph_osd_request *req) { struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base; - union ceph_pg pgid; + struct ceph_pg pgid; int o = -1; int err; struct ceph_osd *newosd = NULL; @@ -530,7 +530,7 @@ static int __map_osds(struct ceph_osd_client *osdc, &req->r_file_layout, osdc->osdmap); if (err) return err; - pgid.pg64 = le64_to_cpu(reqhead->layout.ol_pgid); + pgid = reqhead->layout.ol_pgid; o = ceph_calc_pg_primary(osdc->osdmap, pgid); if ((req->r_osd && req->r_osd->o_osd == o && @@ -538,8 +538,8 @@ static int __map_osds(struct ceph_osd_client *osdc, (req->r_osd == NULL && o == -1)) return 0; /* no change */ - dout("map_osds tid %llu pgid %llx pool %d osd%d (was osd%d)\n", - req->r_tid, pgid.pg64, pgid.pg.pool, o, + dout("map_osds tid %llu pgid %d.%x osd%d (was osd%d)\n", + req->r_tid, le32_to_cpu(pgid.pool), le16_to_cpu(pgid.ps), o, req->r_osd ? req->r_osd->o_osd : -1); if (req->r_osd) { diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c index cd7bb265d789..8b0cd1107507 100644 --- a/fs/ceph/osdmap.c +++ b/fs/ceph/osdmap.c @@ -366,19 +366,33 @@ static int osdmap_set_max_osd(struct ceph_osdmap *map, int max) /* * Insert a new pg_temp mapping */ +static int pgid_cmp(struct ceph_pg l, struct ceph_pg r) +{ + u64 a = *(u64 *)&l; + u64 b = *(u64 *)&r; + + if (a < b) + return -1; + if (a > b) + return 1; + return 0; +} + static int __insert_pg_mapping(struct ceph_pg_mapping *new, struct rb_root *root) { struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; struct ceph_pg_mapping *pg = NULL; + int c; while (*p) { parent = *p; pg = rb_entry(parent, struct ceph_pg_mapping, node); - if (new->pgid < pg->pgid) + c = pgid_cmp(new->pgid, pg->pgid); + if (c < 0) p = &(*p)->rb_left; - else if (new->pgid > pg->pgid) + else if (c > 0) p = &(*p)->rb_right; else return -EEXIST; @@ -467,11 +481,11 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) ceph_decode_32_safe(p, end, len, bad); for (i = 0; i < len; i++) { int n, j; - u64 pgid; + struct ceph_pg pgid; struct ceph_pg_mapping *pg; ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad); - pgid = ceph_decode_64(p); + ceph_decode_copy(p, &pgid, sizeof(pgid)); n = ceph_decode_32(p); ceph_decode_need(p, end, n * sizeof(u32), bad); pg = kmalloc(sizeof(*pg) + n*sizeof(u32), GFP_NOFS); @@ -487,7 +501,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) err = __insert_pg_mapping(pg, &map->pg_temp); if (err) goto bad; - dout(" added pg_temp %llx len %d\n", pgid, len); + dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid, len); } /* crush */ @@ -659,19 +673,20 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, while (len--) { struct ceph_pg_mapping *pg; int j; - u64 pgid; + struct ceph_pg pgid; u32 pglen; ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad); - pgid = ceph_decode_64(p); + ceph_decode_copy(p, &pgid, sizeof(pgid)); pglen = ceph_decode_32(p); /* remove any? */ - while (rbp && rb_entry(rbp, struct ceph_pg_mapping, - node)->pgid <= pgid) { + while (rbp && pgid_cmp(rb_entry(rbp, struct ceph_pg_mapping, + node)->pgid, pgid) <= 0) { struct rb_node *cur = rbp; rbp = rb_next(rbp); dout(" removed pg_temp %llx\n", - rb_entry(cur, struct ceph_pg_mapping, node)->pgid); + *(u64 *)&rb_entry(cur, struct ceph_pg_mapping, + node)->pgid); rb_erase(cur, &map->pg_temp); } @@ -690,14 +705,16 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, err = __insert_pg_mapping(pg, &map->pg_temp); if (err) goto bad; - dout(" added pg_temp %llx len %d\n", pgid, pglen); + dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid, + pglen); } } while (rbp) { struct rb_node *cur = rbp; rbp = rb_next(rbp); dout(" removed pg_temp %llx\n", - rb_entry(cur, struct ceph_pg_mapping, node)->pgid); + *(u64 *)&rb_entry(cur, struct ceph_pg_mapping, + node)->pgid); rb_erase(cur, &map->pg_temp); } @@ -782,16 +799,19 @@ int ceph_calc_object_layout(struct ceph_object_layout *ol, struct ceph_osdmap *osdmap) { unsigned num, num_mask; - union ceph_pg pgid; + struct ceph_pg pgid; s32 preferred = (s32)le32_to_cpu(fl->fl_pg_preferred); int poolid = le32_to_cpu(fl->fl_pg_pool); struct ceph_pg_pool_info *pool; + unsigned ps; if (poolid >= osdmap->num_pools) return -EIO; - pool = &osdmap->pg_pool[poolid]; + pool = &osdmap->pg_pool[poolid]; + ps = ceph_full_name_hash(oid, strlen(oid)); if (preferred >= 0) { + ps += preferred; num = le32_to_cpu(pool->v.lpg_num); num_mask = pool->lpg_num_mask; } else { @@ -799,22 +819,17 @@ int ceph_calc_object_layout(struct ceph_object_layout *ol, num_mask = pool->pg_num_mask; } - pgid.pg64 = 0; /* start with it zeroed out */ - pgid.pg.ps = ceph_full_name_hash(oid, strlen(oid)); - pgid.pg.preferred = preferred; - if (preferred >= 0) - pgid.pg.ps += preferred; - pgid.pg.pool = le32_to_cpu(fl->fl_pg_pool); + pgid.ps = cpu_to_le16(ps); + pgid.preferred = cpu_to_le16(preferred); + pgid.pool = fl->fl_pg_pool; if (preferred >= 0) - dout("calc_object_layout '%s' pgid %d.%xp%d (%llx)\n", oid, - pgid.pg.pool, pgid.pg.ps, (int)preferred, pgid.pg64); + dout("calc_object_layout '%s' pgid %d.%xp%d\n", oid, poolid, ps, + (int)preferred); else - dout("calc_object_layout '%s' pgid %d.%x (%llx)\n", oid, - pgid.pg.pool, pgid.pg.ps, pgid.pg64); + dout("calc_object_layout '%s' pgid %d.%x\n", oid, poolid, ps); - ol->ol_pgid = cpu_to_le64(pgid.pg64); + ol->ol_pgid = pgid; ol->ol_stripe_unit = fl->fl_object_stripe_unit; - return 0; } @@ -822,21 +837,24 @@ int ceph_calc_object_layout(struct ceph_object_layout *ol, * Calculate raw osd vector for the given pgid. Return pointer to osd * array, or NULL on failure. */ -static int *calc_pg_raw(struct ceph_osdmap *osdmap, union ceph_pg pgid, +static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid, int *osds, int *num) { struct rb_node *n = osdmap->pg_temp.rb_node; struct ceph_pg_mapping *pg; struct ceph_pg_pool_info *pool; int ruleno; - unsigned pps; /* placement ps */ + unsigned poolid, ps, pps; + int preferred; + int c; /* pg_temp? */ while (n) { pg = rb_entry(n, struct ceph_pg_mapping, node); - if (pgid.pg64 < pg->pgid) + c = pgid_cmp(pgid, pg->pgid); + if (c < 0) n = n->rb_left; - else if (pgid.pg64 > pg->pgid) + else if (c > 0) n = n->rb_right; else { *num = pg->len; @@ -845,36 +863,40 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, union ceph_pg pgid, } /* crush */ - if (pgid.pg.pool >= osdmap->num_pools) + poolid = le32_to_cpu(pgid.pool); + ps = le16_to_cpu(pgid.ps); + preferred = (s16)le16_to_cpu(pgid.preferred); + + if (poolid >= osdmap->num_pools) return NULL; - pool = &osdmap->pg_pool[pgid.pg.pool]; + pool = &osdmap->pg_pool[poolid]; ruleno = crush_find_rule(osdmap->crush, pool->v.crush_ruleset, pool->v.type, pool->v.size); if (ruleno < 0) { pr_err("no crush rule pool %d type %d size %d\n", - pgid.pg.pool, pool->v.type, pool->v.size); + poolid, pool->v.type, pool->v.size); return NULL; } - if (pgid.pg.preferred >= 0) - pps = ceph_stable_mod(pgid.pg.ps, + if (preferred >= 0) + pps = ceph_stable_mod(ps, le32_to_cpu(pool->v.lpgp_num), pool->lpgp_num_mask); else - pps = ceph_stable_mod(pgid.pg.ps, + pps = ceph_stable_mod(ps, le32_to_cpu(pool->v.pgp_num), pool->pgp_num_mask); - pps += pgid.pg.pool; + pps += poolid; *num = crush_do_rule(osdmap->crush, ruleno, pps, osds, min_t(int, pool->v.size, *num), - pgid.pg.preferred, osdmap->osd_weight); + preferred, osdmap->osd_weight); return osds; } /* * Return primary osd for given pgid, or -1 if none. */ -int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, union ceph_pg pgid) +int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, struct ceph_pg pgid) { int rawosds[10], *osds; int i, num = ARRAY_SIZE(rawosds); diff --git a/fs/ceph/osdmap.h b/fs/ceph/osdmap.h index 07127c6fb134..c4af8418aa00 100644 --- a/fs/ceph/osdmap.h +++ b/fs/ceph/osdmap.h @@ -25,7 +25,7 @@ struct ceph_pg_pool_info { struct ceph_pg_mapping { struct rb_node node; - u64 pgid; + struct ceph_pg pgid; int len; int osds[]; }; @@ -118,6 +118,7 @@ extern int ceph_calc_object_layout(struct ceph_object_layout *ol, const char *oid, struct ceph_file_layout *fl, struct ceph_osdmap *osdmap); -extern int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, union ceph_pg pgid); +extern int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, + struct ceph_pg pgid); #endif diff --git a/fs/ceph/rados.h b/fs/ceph/rados.h index a48cf4ae391e..85bdef78d142 100644 --- a/fs/ceph/rados.h +++ b/fs/ceph/rados.h @@ -55,13 +55,10 @@ struct ceph_timespec { * placement group. * we encode this into one __le64. */ -union ceph_pg { - __u64 pg64; - struct { - __s16 preferred; /* preferred primary osd */ - __u16 ps; /* placement seed */ - __u32 pool; /* object pool */ - } __attribute__ ((packed)) pg; +struct ceph_pg { + __le16 preferred; /* preferred primary osd */ + __le16 ps; /* placement seed */ + __le32 pool; /* object pool */ } __attribute__ ((packed)); /* @@ -117,7 +114,7 @@ static inline int ceph_stable_mod(int x, int b, int bmask) * object layout - how a given object should be stored. */ struct ceph_object_layout { - __le64 ol_pgid; /* raw pg, with _full_ ps precision. */ + struct ceph_pg ol_pgid; /* raw pg, with _full_ ps precision. */ __le32 ol_stripe_unit; /* for per-object parity, if any */ } __attribute__ ((packed)); -- cgit v1.2.3 From 6a18be16f7513ea8a4923c161ce073987932cbdb Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 4 Nov 2009 11:40:05 -0800 Subject: ceph: fix sparse endian warning Use the __le macro, even though for -1 it doesn't matter. Signed-off-by: Sage Weil --- fs/ceph/file.c | 2 +- fs/ceph/super.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 1bd57c8953bf..fc8aff4767d3 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -52,7 +52,7 @@ prepare_open_request(struct super_block *sb, int flags, int create_mode) req->r_fmode = ceph_flags_to_mode(flags); req->r_args.open.flags = cpu_to_le32(flags); req->r_args.open.mode = cpu_to_le32(create_mode); - req->r_args.open.preferred = -1; + req->r_args.open.preferred = cpu_to_le32(-1); out: return req; } diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 0ae40bad53c4..1ac7b07214f3 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -804,7 +804,7 @@ static int ceph_get_sb(struct file_system_type *fs_type, struct ceph_client *client; int err; int (*compare_super)(struct super_block *, void *) = ceph_compare_super; - const char *path = 0; + const char *path = NULL; struct ceph_mount_args *args; dout("ceph_get_sb\n"); -- cgit v1.2.3 From f28bcfbe660a3246621a367020054d4f1a179cd9 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 4 Nov 2009 11:46:35 -0800 Subject: ceph: convert port endianness The port is informational only, but we should make it correct. Signed-off-by: Sage Weil --- fs/ceph/messenger.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index 6ff44bbddf67..5cc374850d8f 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -891,9 +891,9 @@ static int addr_port(struct sockaddr_storage *ss) { switch (ss->ss_family) { case AF_INET: - return ((struct sockaddr_in *)ss)->sin_port; + return ntohs(((struct sockaddr_in *)ss)->sin_port); case AF_INET6: - return ((struct sockaddr_in6 *)ss)->sin6_port; + return ntohs(((struct sockaddr_in6 *)ss)->sin6_port); } return 0; } -- cgit v1.2.3 From 1bdb70e59026838a79f77c440f8fe480a66e65e8 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 6 Nov 2009 13:57:49 -0800 Subject: ceph: clean up 'osd%d down' console msg No ceph prefix. Signed-off-by: Sage Weil --- fs/ceph/osdmap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c index 8b0cd1107507..a025555b70af 100644 --- a/fs/ceph/osdmap.c +++ b/fs/ceph/osdmap.c @@ -648,7 +648,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, u32 osd; ceph_decode_32_safe(p, end, osd, bad); (*p)++; /* clean flag */ - pr_info("ceph osd%d down\n", osd); + pr_info("osd%d down\n", osd); if (osd < map->max_osd) map->osd_state[osd] &= ~CEPH_OSD_UP; } -- cgit v1.2.3 From c6cf726316abd613cfb7c325d950f3629f964ec6 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 6 Nov 2009 16:39:26 -0800 Subject: ceph: make CRUSH hash functions non-inline These are way to big to be inline. I missed crush/* when doing the inline audit for akpm's review. Signed-off-by: Sage Weil --- fs/ceph/Makefile | 2 +- fs/ceph/README | 1 + fs/ceph/crush/crush.c | 11 ++++++ fs/ceph/crush/crush.h | 11 +----- fs/ceph/crush/hash.c | 86 +++++++++++++++++++++++++++++++++++++++++++++++ fs/ceph/crush/hash.h | 92 ++++----------------------------------------------- 6 files changed, 107 insertions(+), 96 deletions(-) create mode 100644 fs/ceph/crush/hash.c diff --git a/fs/ceph/Makefile b/fs/ceph/Makefile index 7da6d69dba29..8bad70aac363 100644 --- a/fs/ceph/Makefile +++ b/fs/ceph/Makefile @@ -11,7 +11,7 @@ ceph-objs := super.o inode.o dir.o file.o addr.o ioctl.o \ messenger.o msgpool.o buffer.o \ mds_client.o mdsmap.o \ mon_client.o \ - osd_client.o osdmap.o crush/crush.o crush/mapper.o \ + osd_client.o osdmap.o crush/crush.o crush/mapper.o crush/hash.o \ debugfs.o \ ceph_fs.o ceph_strings.o ceph_frag.o diff --git a/fs/ceph/README b/fs/ceph/README index 231a1df5eafe..660e00074d59 100644 --- a/fs/ceph/README +++ b/fs/ceph/README @@ -15,3 +15,4 @@ src/crush/crush.h fs/ceph/crush/crush.h src/crush/mapper.c fs/ceph/crush/mapper.c src/crush/mapper.h fs/ceph/crush/mapper.h src/crush/hash.h fs/ceph/crush/hash.h +src/crush/hash.c fs/ceph/crush/hash.c diff --git a/fs/ceph/crush/crush.c b/fs/ceph/crush/crush.c index 13755cdc4fb3..fabd302e5779 100644 --- a/fs/ceph/crush/crush.c +++ b/fs/ceph/crush/crush.c @@ -10,6 +10,17 @@ #include "crush.h" +const char *crush_bucket_alg_name(int alg) +{ + switch (alg) { + case CRUSH_BUCKET_UNIFORM: return "uniform"; + case CRUSH_BUCKET_LIST: return "list"; + case CRUSH_BUCKET_TREE: return "tree"; + case CRUSH_BUCKET_STRAW: return "straw"; + default: return "unknown"; + } +} + /** * crush_get_bucket_item_weight - Get weight of an item in given bucket * @b: bucket pointer diff --git a/fs/ceph/crush/crush.h b/fs/ceph/crush/crush.h index 9ac7e091126f..92c6b3c3a571 100644 --- a/fs/ceph/crush/crush.h +++ b/fs/ceph/crush/crush.h @@ -97,16 +97,7 @@ enum { CRUSH_BUCKET_TREE = 3, CRUSH_BUCKET_STRAW = 4 }; -static inline const char *crush_bucket_alg_name(int alg) -{ - switch (alg) { - case CRUSH_BUCKET_UNIFORM: return "uniform"; - case CRUSH_BUCKET_LIST: return "list"; - case CRUSH_BUCKET_TREE: return "tree"; - case CRUSH_BUCKET_STRAW: return "straw"; - default: return "unknown"; - } -} +extern const char *crush_bucket_alg_name(int alg); struct crush_bucket { __s32 id; /* this'll be negative */ diff --git a/fs/ceph/crush/hash.c b/fs/ceph/crush/hash.c new file mode 100644 index 000000000000..b438c5d27816 --- /dev/null +++ b/fs/ceph/crush/hash.c @@ -0,0 +1,86 @@ + +#include + +/* + * Robert Jenkins' function for mixing 32-bit values + * http://burtleburtle.net/bob/hash/evahash.html + * a, b = random bits, c = input and output + */ +#define crush_hashmix(a, b, c) do { \ + a = a-b; a = a-c; a = a^(c>>13); \ + b = b-c; b = b-a; b = b^(a<<8); \ + c = c-a; c = c-b; c = c^(b>>13); \ + a = a-b; a = a-c; a = a^(c>>12); \ + b = b-c; b = b-a; b = b^(a<<16); \ + c = c-a; c = c-b; c = c^(b>>5); \ + a = a-b; a = a-c; a = a^(c>>3); \ + b = b-c; b = b-a; b = b^(a<<10); \ + c = c-a; c = c-b; c = c^(b>>15); \ + } while (0) + +#define crush_hash_seed 1315423911 + +__u32 crush_hash32(__u32 a) +{ + __u32 hash = crush_hash_seed ^ a; + __u32 b = a; + __u32 x = 231232; + __u32 y = 1232; + crush_hashmix(b, x, hash); + crush_hashmix(y, a, hash); + return hash; +} + +__u32 crush_hash32_2(__u32 a, __u32 b) +{ + __u32 hash = crush_hash_seed ^ a ^ b; + __u32 x = 231232; + __u32 y = 1232; + crush_hashmix(a, b, hash); + crush_hashmix(x, a, hash); + crush_hashmix(b, y, hash); + return hash; +} + +__u32 crush_hash32_3(__u32 a, __u32 b, __u32 c) +{ + __u32 hash = crush_hash_seed ^ a ^ b ^ c; + __u32 x = 231232; + __u32 y = 1232; + crush_hashmix(a, b, hash); + crush_hashmix(c, x, hash); + crush_hashmix(y, a, hash); + crush_hashmix(b, x, hash); + crush_hashmix(y, c, hash); + return hash; +} + +__u32 crush_hash32_4(__u32 a, __u32 b, __u32 c, __u32 d) +{ + __u32 hash = crush_hash_seed ^ a ^ b ^ c ^ d; + __u32 x = 231232; + __u32 y = 1232; + crush_hashmix(a, b, hash); + crush_hashmix(c, d, hash); + crush_hashmix(a, x, hash); + crush_hashmix(y, b, hash); + crush_hashmix(c, x, hash); + crush_hashmix(y, d, hash); + return hash; +} + +__u32 crush_hash32_5(__u32 a, __u32 b, __u32 c, __u32 d, __u32 e) +{ + __u32 hash = crush_hash_seed ^ a ^ b ^ c ^ d ^ e; + __u32 x = 231232; + __u32 y = 1232; + crush_hashmix(a, b, hash); + crush_hashmix(c, d, hash); + crush_hashmix(e, x, hash); + crush_hashmix(y, a, hash); + crush_hashmix(b, x, hash); + crush_hashmix(y, c, hash); + crush_hashmix(d, x, hash); + crush_hashmix(y, e, hash); + return hash; +} diff --git a/fs/ceph/crush/hash.h b/fs/ceph/crush/hash.h index 42f3312783a1..9ce89f85dc7d 100644 --- a/fs/ceph/crush/hash.h +++ b/fs/ceph/crush/hash.h @@ -1,90 +1,12 @@ #ifndef _CRUSH_HASH_H #define _CRUSH_HASH_H -/* - * Robert Jenkins' function for mixing 32-bit values - * http://burtleburtle.net/bob/hash/evahash.html - * a, b = random bits, c = input and output - */ -#define crush_hashmix(a, b, c) do { \ - a = a-b; a = a-c; a = a^(c>>13); \ - b = b-c; b = b-a; b = b^(a<<8); \ - c = c-a; c = c-b; c = c^(b>>13); \ - a = a-b; a = a-c; a = a^(c>>12); \ - b = b-c; b = b-a; b = b^(a<<16); \ - c = c-a; c = c-b; c = c^(b>>5); \ - a = a-b; a = a-c; a = a^(c>>3); \ - b = b-c; b = b-a; b = b^(a<<10); \ - c = c-a; c = c-b; c = c^(b>>15); \ - } while (0) - -#define crush_hash_seed 1315423911 - -static inline __u32 crush_hash32(__u32 a) -{ - __u32 hash = crush_hash_seed ^ a; - __u32 b = a; - __u32 x = 231232; - __u32 y = 1232; - crush_hashmix(b, x, hash); - crush_hashmix(y, a, hash); - return hash; -} - -static inline __u32 crush_hash32_2(__u32 a, __u32 b) -{ - __u32 hash = crush_hash_seed ^ a ^ b; - __u32 x = 231232; - __u32 y = 1232; - crush_hashmix(a, b, hash); - crush_hashmix(x, a, hash); - crush_hashmix(b, y, hash); - return hash; -} - -static inline __u32 crush_hash32_3(__u32 a, __u32 b, __u32 c) -{ - __u32 hash = crush_hash_seed ^ a ^ b ^ c; - __u32 x = 231232; - __u32 y = 1232; - crush_hashmix(a, b, hash); - crush_hashmix(c, x, hash); - crush_hashmix(y, a, hash); - crush_hashmix(b, x, hash); - crush_hashmix(y, c, hash); - return hash; -} - -static inline __u32 crush_hash32_4(__u32 a, __u32 b, __u32 c, - __u32 d) -{ - __u32 hash = crush_hash_seed ^ a ^ b ^ c ^ d; - __u32 x = 231232; - __u32 y = 1232; - crush_hashmix(a, b, hash); - crush_hashmix(c, d, hash); - crush_hashmix(a, x, hash); - crush_hashmix(y, b, hash); - crush_hashmix(c, x, hash); - crush_hashmix(y, d, hash); - return hash; -} - -static inline __u32 crush_hash32_5(__u32 a, __u32 b, __u32 c, - __u32 d, __u32 e) -{ - __u32 hash = crush_hash_seed ^ a ^ b ^ c ^ d ^ e; - __u32 x = 231232; - __u32 y = 1232; - crush_hashmix(a, b, hash); - crush_hashmix(c, d, hash); - crush_hashmix(e, x, hash); - crush_hashmix(y, a, hash); - crush_hashmix(b, x, hash); - crush_hashmix(y, c, hash); - crush_hashmix(d, x, hash); - crush_hashmix(y, e, hash); - return hash; -} +extern __u32 crush_hash32(__u32 a); +extern __u32 crush_hash32_2(__u32 a, __u32 b); +extern __u32 crush_hash32_3(__u32 a, __u32 b, __u32 c); +extern __u32 crush_hash32_4(__u32 a, __u32 b, __u32 c, + __u32 d); +extern __u32 crush_hash32_5(__u32 a, __u32 b, __u32 c, + __u32 d, __u32 e); #endif -- cgit v1.2.3 From cfbbcd24a6bfd794295ee7ad76dfbff40ad6b934 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 6 Nov 2009 16:44:05 -0800 Subject: ceph: use strong hash function for mapping objects to pgs We were using the (weak) dcache hash function, but it was leaving lower bits consecutive for consecutive (inode) objects. We really want to make the object to pg mapping random and uniform, so use a proper hash function here. This is Robert Jenkin's public domain hash function (with some minor cleanup): http://burtleburtle.net/bob/hash/evahash.html This is a protocol revision. Signed-off-by: Sage Weil --- fs/ceph/ceph_fs.c | 93 +++++++++++++++++++++++++++++++++++++++++-------------- fs/ceph/ceph_fs.h | 2 +- 2 files changed, 71 insertions(+), 24 deletions(-) diff --git a/fs/ceph/ceph_fs.c b/fs/ceph/ceph_fs.c index a950b4083577..b3ecf1b07521 100644 --- a/fs/ceph/ceph_fs.c +++ b/fs/ceph/ceph_fs.c @@ -73,32 +73,79 @@ int ceph_caps_for_mode(int mode) return 0; } -/* Name hashing routines. Initial hash value */ -/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */ -#define ceph_init_name_hash() 0 - -/* partial hash update function. Assume roughly 4 bits per character */ -static unsigned long ceph_partial_name_hash(unsigned long c, - unsigned long prevhash) -{ - return (prevhash + (c << 4) + (c >> 4)) * 11; -} - /* - * Finally: cut down the number of bits to a int value (and try to avoid - * losing bits) + * Robert Jenkin's hash function. + * http://burtleburtle.net/bob/hash/evahash.html + * This is in the public domain. */ -static unsigned long ceph_end_name_hash(unsigned long hash) -{ - return hash & 0xffffffff; -} +#define mix(a, b, c) \ + do { \ + a = a - b; a = a - c; a = a ^ (c >> 13); \ + b = b - c; b = b - a; b = b ^ (a << 8); \ + c = c - a; c = c - b; c = c ^ (b >> 13); \ + a = a - b; a = a - c; a = a ^ (c >> 12); \ + b = b - c; b = b - a; b = b ^ (a << 16); \ + c = c - a; c = c - b; c = c ^ (b >> 5); \ + a = a - b; a = a - c; a = a ^ (c >> 3); \ + b = b - c; b = b - a; b = b ^ (a << 10); \ + c = c - a; c = c - b; c = c ^ (b >> 15); \ + } while (0) -/* Compute the hash for a name string. */ -unsigned int ceph_full_name_hash(const char *name, unsigned int len) +unsigned int ceph_full_name_hash(const char *str, unsigned int length) { - unsigned long hash = ceph_init_name_hash(); - while (len--) - hash = ceph_partial_name_hash(*name++, hash); - return ceph_end_name_hash(hash); + const unsigned char *k = (const unsigned char *)str; + __u32 a, b, c; /* the internal state */ + __u32 len; /* how many key bytes still need mixing */ + + /* Set up the internal state */ + len = length; + a = 0x9e3779b9; /* the golden ratio; an arbitrary value */ + b = a; + c = 0; /* variable initialization of internal state */ + + /* handle most of the key */ + while (len >= 12) { + a = a + (k[0] + ((__u32)k[1] << 8) + ((__u32)k[2] << 16) + + ((__u32)k[3] << 24)); + b = b + (k[4] + ((__u32)k[5] << 8) + ((__u32)k[6] << 16) + + ((__u32)k[7] << 24)); + c = c + (k[8] + ((__u32)k[9] << 8) + ((__u32)k[10] << 16) + + ((__u32)k[11] << 24)); + mix(a, b, c); + k = k + 12; + len = len - 12; + } + + /* handle the last 11 bytes */ + c = c + length; + switch (len) { /* all the case statements fall through */ + case 11: + c = c + ((__u32)k[10] << 24); + case 10: + c = c + ((__u32)k[9] << 16); + case 9: + c = c + ((__u32)k[8] << 8); + /* the first byte of c is reserved for the length */ + case 8: + b = b + ((__u32)k[7] << 24); + case 7: + b = b + ((__u32)k[6] << 16); + case 6: + b = b + ((__u32)k[5] << 8); + case 5: + b = b + k[4]; + case 4: + a = a + ((__u32)k[3] << 24); + case 3: + a = a + ((__u32)k[2] << 16); + case 2: + a = a + ((__u32)k[1] << 8); + case 1: + a = a + k[0]; + /* case 0: nothing left to add */ + } + mix(a, b, c); + + return c; } diff --git a/fs/ceph/ceph_fs.h b/fs/ceph/ceph_fs.h index ae523828c538..25fc537f4140 100644 --- a/fs/ceph/ceph_fs.h +++ b/fs/ceph/ceph_fs.h @@ -38,7 +38,7 @@ #define CEPH_OSD_PROTOCOL 7 /* cluster internal */ #define CEPH_MDS_PROTOCOL 9 /* cluster internal */ #define CEPH_MON_PROTOCOL 5 /* cluster internal */ -#define CEPH_OSDC_PROTOCOL 21 /* server/client */ +#define CEPH_OSDC_PROTOCOL 22 /* server/client */ #define CEPH_MDSC_PROTOCOL 29 /* server/client */ #define CEPH_MONC_PROTOCOL 15 /* server/client */ -- cgit v1.2.3 From 1654dd0cf5ee1827322aca156af7d96d757201c7 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 6 Nov 2009 21:55:25 -0800 Subject: ceph: make object hash a pg_pool property The object will be hashed to a placement seed (ps) based on the pg_pool's hash function. This allows new hashes to be introduced into an existing object store, or selection of a hash appropriate to the objects that will be stored in a particular pool. Signed-off-by: Sage Weil --- fs/ceph/Makefile | 2 +- fs/ceph/README | 2 + fs/ceph/ceph_fs.c | 77 ---------------------------------- fs/ceph/ceph_fs.h | 2 - fs/ceph/ceph_hash.c | 118 ++++++++++++++++++++++++++++++++++++++++++++++++++++ fs/ceph/ceph_hash.h | 13 ++++++ fs/ceph/osdmap.c | 2 +- fs/ceph/rados.h | 1 + fs/ceph/types.h | 1 + 9 files changed, 137 insertions(+), 81 deletions(-) create mode 100644 fs/ceph/ceph_hash.c create mode 100644 fs/ceph/ceph_hash.h diff --git a/fs/ceph/Makefile b/fs/ceph/Makefile index 8bad70aac363..bdd3e6fc1609 100644 --- a/fs/ceph/Makefile +++ b/fs/ceph/Makefile @@ -13,7 +13,7 @@ ceph-objs := super.o inode.o dir.o file.o addr.o ioctl.o \ mon_client.o \ osd_client.o osdmap.o crush/crush.o crush/mapper.o crush/hash.o \ debugfs.o \ - ceph_fs.o ceph_strings.o ceph_frag.o + ceph_fs.o ceph_strings.o ceph_hash.o ceph_frag.o else #Otherwise we were called directly from the command diff --git a/fs/ceph/README b/fs/ceph/README index 660e00074d59..18352fab37c0 100644 --- a/fs/ceph/README +++ b/fs/ceph/README @@ -10,6 +10,8 @@ src/include/rados.h fs/ceph/rados.h src/include/ceph_strings.cc fs/ceph/ceph_strings.c src/include/ceph_frag.h fs/ceph/ceph_frag.h src/include/ceph_frag.cc fs/ceph/ceph_frag.c +src/include/ceph_hash.h fs/ceph/ceph_hash.h +src/include/ceph_hash.cc fs/ceph/ceph_hash.c src/crush/crush.c fs/ceph/crush/crush.c src/crush/crush.h fs/ceph/crush/crush.h src/crush/mapper.c fs/ceph/crush/mapper.c diff --git a/fs/ceph/ceph_fs.c b/fs/ceph/ceph_fs.c index b3ecf1b07521..79d76bc4303f 100644 --- a/fs/ceph/ceph_fs.c +++ b/fs/ceph/ceph_fs.c @@ -72,80 +72,3 @@ int ceph_caps_for_mode(int mode) } return 0; } - -/* - * Robert Jenkin's hash function. - * http://burtleburtle.net/bob/hash/evahash.html - * This is in the public domain. - */ -#define mix(a, b, c) \ - do { \ - a = a - b; a = a - c; a = a ^ (c >> 13); \ - b = b - c; b = b - a; b = b ^ (a << 8); \ - c = c - a; c = c - b; c = c ^ (b >> 13); \ - a = a - b; a = a - c; a = a ^ (c >> 12); \ - b = b - c; b = b - a; b = b ^ (a << 16); \ - c = c - a; c = c - b; c = c ^ (b >> 5); \ - a = a - b; a = a - c; a = a ^ (c >> 3); \ - b = b - c; b = b - a; b = b ^ (a << 10); \ - c = c - a; c = c - b; c = c ^ (b >> 15); \ - } while (0) - -unsigned int ceph_full_name_hash(const char *str, unsigned int length) -{ - const unsigned char *k = (const unsigned char *)str; - __u32 a, b, c; /* the internal state */ - __u32 len; /* how many key bytes still need mixing */ - - /* Set up the internal state */ - len = length; - a = 0x9e3779b9; /* the golden ratio; an arbitrary value */ - b = a; - c = 0; /* variable initialization of internal state */ - - /* handle most of the key */ - while (len >= 12) { - a = a + (k[0] + ((__u32)k[1] << 8) + ((__u32)k[2] << 16) + - ((__u32)k[3] << 24)); - b = b + (k[4] + ((__u32)k[5] << 8) + ((__u32)k[6] << 16) + - ((__u32)k[7] << 24)); - c = c + (k[8] + ((__u32)k[9] << 8) + ((__u32)k[10] << 16) + - ((__u32)k[11] << 24)); - mix(a, b, c); - k = k + 12; - len = len - 12; - } - - /* handle the last 11 bytes */ - c = c + length; - switch (len) { /* all the case statements fall through */ - case 11: - c = c + ((__u32)k[10] << 24); - case 10: - c = c + ((__u32)k[9] << 16); - case 9: - c = c + ((__u32)k[8] << 8); - /* the first byte of c is reserved for the length */ - case 8: - b = b + ((__u32)k[7] << 24); - case 7: - b = b + ((__u32)k[6] << 16); - case 6: - b = b + ((__u32)k[5] << 8); - case 5: - b = b + k[4]; - case 4: - a = a + ((__u32)k[3] << 24); - case 3: - a = a + ((__u32)k[2] << 16); - case 2: - a = a + ((__u32)k[1] << 8); - case 1: - a = a + k[0]; - /* case 0: nothing left to add */ - } - mix(a, b, c); - - return c; -} - diff --git a/fs/ceph/ceph_fs.h b/fs/ceph/ceph_fs.h index 25fc537f4140..36becb024788 100644 --- a/fs/ceph/ceph_fs.h +++ b/fs/ceph/ceph_fs.h @@ -49,8 +49,6 @@ #define CEPH_MAX_MON 31 -unsigned int ceph_full_name_hash(const char *name, unsigned int len); - /* * ceph_file_layout - describe data layout for a file/inode diff --git a/fs/ceph/ceph_hash.c b/fs/ceph/ceph_hash.c new file mode 100644 index 000000000000..ac8be54631fe --- /dev/null +++ b/fs/ceph/ceph_hash.c @@ -0,0 +1,118 @@ + +#include "types.h" + +/* + * Robert Jenkin's hash function. + * http://burtleburtle.net/bob/hash/evahash.html + * This is in the public domain. + */ +#define mix(a, b, c) \ + do { \ + a = a - b; a = a - c; a = a ^ (c >> 13); \ + b = b - c; b = b - a; b = b ^ (a << 8); \ + c = c - a; c = c - b; c = c ^ (b >> 13); \ + a = a - b; a = a - c; a = a ^ (c >> 12); \ + b = b - c; b = b - a; b = b ^ (a << 16); \ + c = c - a; c = c - b; c = c ^ (b >> 5); \ + a = a - b; a = a - c; a = a ^ (c >> 3); \ + b = b - c; b = b - a; b = b ^ (a << 10); \ + c = c - a; c = c - b; c = c ^ (b >> 15); \ + } while (0) + +unsigned ceph_str_hash_rjenkins(const char *str, unsigned length) +{ + const unsigned char *k = (const unsigned char *)str; + __u32 a, b, c; /* the internal state */ + __u32 len; /* how many key bytes still need mixing */ + + /* Set up the internal state */ + len = length; + a = 0x9e3779b9; /* the golden ratio; an arbitrary value */ + b = a; + c = 0; /* variable initialization of internal state */ + + /* handle most of the key */ + while (len >= 12) { + a = a + (k[0] + ((__u32)k[1] << 8) + ((__u32)k[2] << 16) + + ((__u32)k[3] << 24)); + b = b + (k[4] + ((__u32)k[5] << 8) + ((__u32)k[6] << 16) + + ((__u32)k[7] << 24)); + c = c + (k[8] + ((__u32)k[9] << 8) + ((__u32)k[10] << 16) + + ((__u32)k[11] << 24)); + mix(a, b, c); + k = k + 12; + len = len - 12; + } + + /* handle the last 11 bytes */ + c = c + length; + switch (len) { /* all the case statements fall through */ + case 11: + c = c + ((__u32)k[10] << 24); + case 10: + c = c + ((__u32)k[9] << 16); + case 9: + c = c + ((__u32)k[8] << 8); + /* the first byte of c is reserved for the length */ + case 8: + b = b + ((__u32)k[7] << 24); + case 7: + b = b + ((__u32)k[6] << 16); + case 6: + b = b + ((__u32)k[5] << 8); + case 5: + b = b + k[4]; + case 4: + a = a + ((__u32)k[3] << 24); + case 3: + a = a + ((__u32)k[2] << 16); + case 2: + a = a + ((__u32)k[1] << 8); + case 1: + a = a + k[0]; + /* case 0: nothing left to add */ + } + mix(a, b, c); + + return c; +} + +/* + * linux dcache hash + */ +unsigned ceph_str_hash_linux(const char *str, unsigned length) +{ + unsigned long hash = 0; + unsigned char c; + + while (length-- > 0) { + c = *str++; + hash = (hash + (c << 4) + (c >> 4)) * 11; + } + return hash; +} + + +unsigned ceph_str_hash(int type, const char *s, unsigned len) +{ + switch (type) { + case CEPH_STR_HASH_LINUX: + return ceph_str_hash_linux(s, len); + case CEPH_STR_HASH_RJENKINS: + return ceph_str_hash_rjenkins(s, len); + default: + return -1; + } +} + +const char *ceph_str_hash_name(int type) +{ + switch (type) { + case CEPH_STR_HASH_LINUX: + return "linux"; + case CEPH_STR_HASH_RJENKINS: + return "rjenkins"; + default: + return "unknown"; + } +} diff --git a/fs/ceph/ceph_hash.h b/fs/ceph/ceph_hash.h new file mode 100644 index 000000000000..5ac470c433c9 --- /dev/null +++ b/fs/ceph/ceph_hash.h @@ -0,0 +1,13 @@ +#ifndef _FS_CEPH_HASH_H +#define _FS_CEPH_HASH_H + +#define CEPH_STR_HASH_LINUX 0x1 /* linux dcache hash */ +#define CEPH_STR_HASH_RJENKINS 0x2 /* robert jenkins' */ + +extern unsigned ceph_str_hash_linux(const char *s, unsigned len); +extern unsigned ceph_str_hash_rjenkins(const char *s, unsigned len); + +extern unsigned ceph_str_hash(int type, const char *s, unsigned len); +extern const char *ceph_str_hash_name(int type); + +#endif diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c index a025555b70af..68478270ad70 100644 --- a/fs/ceph/osdmap.c +++ b/fs/ceph/osdmap.c @@ -809,7 +809,7 @@ int ceph_calc_object_layout(struct ceph_object_layout *ol, return -EIO; pool = &osdmap->pg_pool[poolid]; - ps = ceph_full_name_hash(oid, strlen(oid)); + ps = ceph_str_hash(pool->v.object_hash, oid, strlen(oid)); if (preferred >= 0) { ps += preferred; num = le32_to_cpu(pool->v.lpg_num); diff --git a/fs/ceph/rados.h b/fs/ceph/rados.h index 85bdef78d142..fb23ff9297c9 100644 --- a/fs/ceph/rados.h +++ b/fs/ceph/rados.h @@ -84,6 +84,7 @@ struct ceph_pg_pool { __u8 type; /* CEPH_PG_TYPE_* */ __u8 size; /* number of osds in each pg */ __u8 crush_ruleset; /* crush placement rule */ + __u8 object_hash; /* hash mapping object name to ps */ __le32 pg_num, pgp_num; /* number of pg's */ __le32 lpg_num, lpgp_num; /* number of localized pg's */ __le32 last_change; /* most recent epoch changed */ diff --git a/fs/ceph/types.h b/fs/ceph/types.h index 8a514568cab2..28b35a005ec2 100644 --- a/fs/ceph/types.h +++ b/fs/ceph/types.h @@ -9,6 +9,7 @@ #include "ceph_fs.h" #include "ceph_frag.h" +#include "ceph_hash.h" /* * Identify inodes by both their ino AND snapshot id (a u64). -- cgit v1.2.3 From fb690390e305ea51e1883b105c7d3c52d7100ba5 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Sat, 7 Nov 2009 20:18:22 -0800 Subject: ceph: make CRUSH hash function a bucket property Make the integer hash function a property of the bucket it is used on. This allows us to gracefully add support for new hash functions without starting from scatch. Signed-off-by: Sage Weil --- fs/ceph/crush/crush.h | 3 ++- fs/ceph/crush/hash.c | 73 ++++++++++++++++++++++++++++++++++++++++++++++---- fs/ceph/crush/hash.h | 19 ++++++++----- fs/ceph/crush/mapper.c | 16 ++++++----- fs/ceph/osdmap.c | 3 ++- 5 files changed, 93 insertions(+), 21 deletions(-) diff --git a/fs/ceph/crush/crush.h b/fs/ceph/crush/crush.h index 92c6b3c3a571..dcd7e7523700 100644 --- a/fs/ceph/crush/crush.h +++ b/fs/ceph/crush/crush.h @@ -102,7 +102,8 @@ extern const char *crush_bucket_alg_name(int alg); struct crush_bucket { __s32 id; /* this'll be negative */ __u16 type; /* non-zero; type=0 is reserved for devices */ - __u16 alg; /* one of CRUSH_BUCKET_* */ + __u8 alg; /* one of CRUSH_BUCKET_* */ + __u8 hash; /* which hash function to use, CRUSH_HASH_* */ __u32 weight; /* 16-bit fixed point */ __u32 size; /* num items */ __s32 *items; diff --git a/fs/ceph/crush/hash.c b/fs/ceph/crush/hash.c index b438c5d27816..5873aed694bf 100644 --- a/fs/ceph/crush/hash.c +++ b/fs/ceph/crush/hash.c @@ -1,5 +1,6 @@ #include +#include "hash.h" /* * Robert Jenkins' function for mixing 32-bit values @@ -20,7 +21,7 @@ #define crush_hash_seed 1315423911 -__u32 crush_hash32(__u32 a) +static __u32 crush_hash32_rjenkins1(__u32 a) { __u32 hash = crush_hash_seed ^ a; __u32 b = a; @@ -31,7 +32,7 @@ __u32 crush_hash32(__u32 a) return hash; } -__u32 crush_hash32_2(__u32 a, __u32 b) +static __u32 crush_hash32_rjenkins1_2(__u32 a, __u32 b) { __u32 hash = crush_hash_seed ^ a ^ b; __u32 x = 231232; @@ -42,7 +43,7 @@ __u32 crush_hash32_2(__u32 a, __u32 b) return hash; } -__u32 crush_hash32_3(__u32 a, __u32 b, __u32 c) +static __u32 crush_hash32_rjenkins1_3(__u32 a, __u32 b, __u32 c) { __u32 hash = crush_hash_seed ^ a ^ b ^ c; __u32 x = 231232; @@ -55,7 +56,7 @@ __u32 crush_hash32_3(__u32 a, __u32 b, __u32 c) return hash; } -__u32 crush_hash32_4(__u32 a, __u32 b, __u32 c, __u32 d) +static __u32 crush_hash32_rjenkins1_4(__u32 a, __u32 b, __u32 c, __u32 d) { __u32 hash = crush_hash_seed ^ a ^ b ^ c ^ d; __u32 x = 231232; @@ -69,7 +70,8 @@ __u32 crush_hash32_4(__u32 a, __u32 b, __u32 c, __u32 d) return hash; } -__u32 crush_hash32_5(__u32 a, __u32 b, __u32 c, __u32 d, __u32 e) +static __u32 crush_hash32_rjenkins1_5(__u32 a, __u32 b, __u32 c, __u32 d, + __u32 e) { __u32 hash = crush_hash_seed ^ a ^ b ^ c ^ d ^ e; __u32 x = 231232; @@ -84,3 +86,64 @@ __u32 crush_hash32_5(__u32 a, __u32 b, __u32 c, __u32 d, __u32 e) crush_hashmix(y, e, hash); return hash; } + + +__u32 crush_hash32(int type, __u32 a) +{ + switch (type) { + case CRUSH_HASH_RJENKINS1: + return crush_hash32_rjenkins1(a); + default: + return 0; + } +} + +__u32 crush_hash32_2(int type, __u32 a, __u32 b) +{ + switch (type) { + case CRUSH_HASH_RJENKINS1: + return crush_hash32_rjenkins1_2(a, b); + default: + return 0; + } +} + +__u32 crush_hash32_3(int type, __u32 a, __u32 b, __u32 c) +{ + switch (type) { + case CRUSH_HASH_RJENKINS1: + return crush_hash32_rjenkins1_3(a, b, c); + default: + return 0; + } +} + +__u32 crush_hash32_4(int type, __u32 a, __u32 b, __u32 c, __u32 d) +{ + switch (type) { + case CRUSH_HASH_RJENKINS1: + return crush_hash32_rjenkins1_4(a, b, c, d); + default: + return 0; + } +} + +__u32 crush_hash32_5(int type, __u32 a, __u32 b, __u32 c, __u32 d, __u32 e) +{ + switch (type) { + case CRUSH_HASH_RJENKINS1: + return crush_hash32_rjenkins1_5(a, b, c, d, e); + default: + return 0; + } +} + +const char *crush_hash_name(int type) +{ + switch (type) { + case CRUSH_HASH_RJENKINS1: + return "rjenkins1"; + default: + return "unknown"; + } +} diff --git a/fs/ceph/crush/hash.h b/fs/ceph/crush/hash.h index 9ce89f85dc7d..ff48e110e4bb 100644 --- a/fs/ceph/crush/hash.h +++ b/fs/ceph/crush/hash.h @@ -1,12 +1,17 @@ #ifndef _CRUSH_HASH_H #define _CRUSH_HASH_H -extern __u32 crush_hash32(__u32 a); -extern __u32 crush_hash32_2(__u32 a, __u32 b); -extern __u32 crush_hash32_3(__u32 a, __u32 b, __u32 c); -extern __u32 crush_hash32_4(__u32 a, __u32 b, __u32 c, - __u32 d); -extern __u32 crush_hash32_5(__u32 a, __u32 b, __u32 c, - __u32 d, __u32 e); +#define CRUSH_HASH_RJENKINS1 0 + +#define CRUSH_HASH_DEFAULT CRUSH_HASH_RJENKINS1 + +extern const char *crush_hash_name(int type); + +extern __u32 crush_hash32(int type, __u32 a); +extern __u32 crush_hash32_2(int type, __u32 a, __u32 b); +extern __u32 crush_hash32_3(int type, __u32 a, __u32 b, __u32 c); +extern __u32 crush_hash32_4(int type, __u32 a, __u32 b, __u32 c, __u32 d); +extern __u32 crush_hash32_5(int type, __u32 a, __u32 b, __u32 c, __u32 d, + __u32 e); #endif diff --git a/fs/ceph/crush/mapper.c b/fs/ceph/crush/mapper.c index 54f3f402af60..2523d448445c 100644 --- a/fs/ceph/crush/mapper.c +++ b/fs/ceph/crush/mapper.c @@ -78,7 +78,7 @@ static int bucket_perm_choose(struct crush_bucket *bucket, /* optimize common r=0 case */ if (pr == 0) { - s = crush_hash32_3(x, bucket->id, 0) % + s = crush_hash32_3(bucket->hash, x, bucket->id, 0) % bucket->size; bucket->perm[0] = s; bucket->perm_n = 0xffff; /* magic value, see below */ @@ -103,7 +103,7 @@ static int bucket_perm_choose(struct crush_bucket *bucket, unsigned p = bucket->perm_n; /* no point in swapping the final entry */ if (p < bucket->size - 1) { - i = crush_hash32_3(x, bucket->id, p) % + i = crush_hash32_3(bucket->hash, x, bucket->id, p) % (bucket->size - p); if (i) { unsigned t = bucket->perm[p + i]; @@ -138,8 +138,8 @@ static int bucket_list_choose(struct crush_bucket_list *bucket, int i; for (i = bucket->h.size-1; i >= 0; i--) { - __u64 w = crush_hash32_4(x, bucket->h.items[i], r, - bucket->h.id); + __u64 w = crush_hash32_4(bucket->h.hash,x, bucket->h.items[i], + r, bucket->h.id); w &= 0xffff; dprintk("list_choose i=%d x=%d r=%d item %d weight %x " "sw %x rand %llx", @@ -198,7 +198,8 @@ static int bucket_tree_choose(struct crush_bucket_tree *bucket, while (!terminal(n)) { /* pick point in [0, w) */ w = bucket->node_weights[n]; - t = (__u64)crush_hash32_4(x, n, r, bucket->h.id) * (__u64)w; + t = (__u64)crush_hash32_4(bucket->h.hash, x, n, r, + bucket->h.id) * (__u64)w; t = t >> 32; /* descend to the left or right? */ @@ -224,7 +225,7 @@ static int bucket_straw_choose(struct crush_bucket_straw *bucket, __u64 draw; for (i = 0; i < bucket->h.size; i++) { - draw = crush_hash32_3(x, bucket->h.items[i], r); + draw = crush_hash32_3(bucket->h.hash, x, bucket->h.items[i], r); draw &= 0xffff; draw *= bucket->straws[i]; if (i == 0 || draw > high_draw) { @@ -267,7 +268,8 @@ static int is_out(struct crush_map *map, __u32 *weight, int item, int x) return 0; if (weight[item] == 0) return 1; - if ((crush_hash32_2(x, item) & 0xffff) < weight[item]) + if ((crush_hash32_2(CRUSH_HASH_RJENKINS1, x, item) & 0xffff) + < weight[item]) return 0; return 1; } diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c index 68478270ad70..8c994c714781 100644 --- a/fs/ceph/osdmap.c +++ b/fs/ceph/osdmap.c @@ -210,7 +210,8 @@ static struct crush_map *crush_decode(void *pbyval, void *end) ceph_decode_need(p, end, 4*sizeof(u32), bad); b->id = ceph_decode_32(p); b->type = ceph_decode_16(p); - b->alg = ceph_decode_16(p); + b->alg = ceph_decode_8(p); + b->hash = ceph_decode_8(p); b->weight = ceph_decode_32(p); b->size = ceph_decode_32(p); -- cgit v1.2.3 From 685f9a5d14194fc35db73e5e7370740ccc14b64a Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 9 Nov 2009 12:05:48 -0800 Subject: ceph: do not confuse stale and dead (unreconnected) caps We were using the cap_gen to track both stale caps (caps that timed out due to temporarily losing touch with the mds) and dead caps that did not reconnect after an MDS failure. Introduce a recon_gen counter to track reconnections to restarted MDSs and kill dead caps based on that instead. Rename gen to cap_gen while we're at it to make it more clear which is which. Signed-off-by: Sage Weil --- fs/ceph/caps.c | 20 +++++++++++++++----- fs/ceph/mds_client.c | 9 ++++++--- fs/ceph/mds_client.h | 2 ++ fs/ceph/super.h | 4 +++- 4 files changed, 26 insertions(+), 9 deletions(-) diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 8b863dbec70c..775e6f6fc970 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -609,7 +609,8 @@ retry: cap->seq = seq; cap->issue_seq = seq; cap->mseq = mseq; - cap->gen = session->s_cap_gen; + cap->cap_gen = session->s_cap_gen; + cap->recon_gen = session->s_recon_gen; if (fmode >= 0) __ceph_get_fmode(ci, fmode); @@ -626,17 +627,25 @@ retry: static int __cap_is_valid(struct ceph_cap *cap) { unsigned long ttl; - u32 gen; + u32 gen, recon_gen; spin_lock(&cap->session->s_cap_lock); gen = cap->session->s_cap_gen; + recon_gen = cap->session->s_recon_gen; ttl = cap->session->s_cap_ttl; spin_unlock(&cap->session->s_cap_lock); - if (cap->gen < gen || time_after_eq(jiffies, ttl)) { + if (cap->recon_gen != recon_gen) { + dout("__cap_is_valid %p cap %p issued %s " + "but DEAD (recon_gen %u vs %u)\n", &cap->ci->vfs_inode, + cap, ceph_cap_string(cap->issued), cap->recon_gen, + recon_gen); + return 0; + } + if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) { dout("__cap_is_valid %p cap %p issued %s " "but STALE (gen %u vs %u)\n", &cap->ci->vfs_inode, - cap, ceph_cap_string(cap->issued), cap->gen, gen); + cap, ceph_cap_string(cap->issued), cap->cap_gen, gen); return 0; } @@ -2203,7 +2212,8 @@ restart: issued = __ceph_caps_issued(ci, &implemented); issued |= implemented | __ceph_caps_dirty(ci); - cap->gen = session->s_cap_gen; + cap->cap_gen = session->s_cap_gen; + cap->recon_gen = session->s_recon_gen; __check_cap_issue(ci, cap, newcaps); diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 210cb6623ea2..828417ae16f9 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -329,6 +329,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc, ceph_con_open(&s->s_con, ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); spin_lock_init(&s->s_cap_lock); + s->s_recon_gen = 0; s->s_cap_gen = 0; s->s_cap_ttl = 0; s->s_renew_requested = 0; @@ -738,10 +739,11 @@ static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap, struct ceph_mds_session *session = arg; spin_lock(&inode->i_lock); - if (cap->gen != session->s_cap_gen) { + if (cap->recon_gen != session->s_recon_gen) { pr_err("failed reconnect %p %llx.%llx cap %p " - "(gen %d < session %d)\n", inode, ceph_vinop(inode), - cap, cap->gen, session->s_cap_gen); + "(recon_gen %d < session %d)\n", inode, + ceph_vinop(inode), cap, + cap->recon_gen, session->s_recon_gen); __ceph_remove_cap(cap, NULL); } wake_up(&ceph_inode(inode)->i_cap_wq); @@ -2050,6 +2052,7 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc, int mds) session->s_state = CEPH_MDS_SESSION_RECONNECTING; session->s_seq = 0; + session->s_recon_gen++; ceph_con_open(&session->s_con, ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index f566e9c84295..c0846b1c482b 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -98,6 +98,8 @@ struct ceph_mds_session { u64 s_seq; /* incoming msg seq # */ struct mutex s_mutex; /* serialize session messages */ + int s_recon_gen; /* inc on reconnect to recovered mds */ + struct ceph_connection s_con; /* protected by s_cap_lock */ diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 05947b96c524..25793559a2e5 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -169,7 +169,9 @@ struct ceph_cap { int issued; /* latest, from the mds */ int implemented; /* implemented superset of issued (for revocation) */ int mds_wanted; - u32 seq, issue_seq, mseq, gen; + u32 seq, issue_seq, mseq; + u32 cap_gen; /* active/stale cycle */ + u32 recon_gen; /* mds restart reconnect cycle */ unsigned long last_used; struct list_head caps_item; }; -- cgit v1.2.3 From eed0ef2caf928327332da54d23579debe629d5bc Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 10 Nov 2009 14:34:36 -0800 Subject: ceph: separate banner and connect during handshake into distinct stages We need to make sure we only swab the address during the banner once. So break process_banner out of process_connect, and clean up the surrounding code so that these are distinct phases of the handshake. Signed-off-by: Sage Weil --- fs/ceph/messenger.c | 117 +++++++++++++++++++++++++++++++++------------------- fs/ceph/messenger.h | 4 +- 2 files changed, 77 insertions(+), 44 deletions(-) diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index 5cc374850d8f..e389656b2a6b 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -564,10 +564,26 @@ static void prepare_write_keepalive(struct ceph_connection *con) /* * We connected to a peer and are saying hello. */ -static void prepare_write_connect(struct ceph_messenger *msgr, - struct ceph_connection *con) +static void prepare_write_banner(struct ceph_messenger *msgr, + struct ceph_connection *con) { int len = strlen(CEPH_BANNER); + + con->out_kvec[0].iov_base = CEPH_BANNER; + con->out_kvec[0].iov_len = len; + con->out_kvec[1].iov_base = &msgr->my_enc_addr; + con->out_kvec[1].iov_len = sizeof(msgr->my_enc_addr); + con->out_kvec_left = 2; + con->out_kvec_bytes = len + sizeof(msgr->my_enc_addr); + con->out_kvec_cur = con->out_kvec; + con->out_more = 0; + set_bit(WRITE_PENDING, &con->state); +} + +static void prepare_write_connect(struct ceph_messenger *msgr, + struct ceph_connection *con, + int after_banner) +{ unsigned global_seq = get_global_seq(con->msgr, 0); int proto; @@ -595,32 +611,14 @@ static void prepare_write_connect(struct ceph_messenger *msgr, if (test_bit(LOSSYTX, &con->state)) con->out_connect.flags = CEPH_MSG_CONNECT_LOSSY; - con->out_kvec[0].iov_base = CEPH_BANNER; - con->out_kvec[0].iov_len = len; - con->out_kvec[1].iov_base = &msgr->my_enc_addr; - con->out_kvec[1].iov_len = sizeof(msgr->my_enc_addr); - con->out_kvec[2].iov_base = &con->out_connect; - con->out_kvec[2].iov_len = sizeof(con->out_connect); - con->out_kvec_left = 3; - con->out_kvec_bytes = len + sizeof(msgr->my_enc_addr) + - sizeof(con->out_connect); - con->out_kvec_cur = con->out_kvec; - con->out_more = 0; - set_bit(WRITE_PENDING, &con->state); -} - -static void prepare_write_connect_retry(struct ceph_messenger *msgr, - struct ceph_connection *con) -{ - dout("prepare_write_connect_retry %p\n", con); - con->out_connect.connect_seq = cpu_to_le32(con->connect_seq); - con->out_connect.global_seq = - cpu_to_le32(get_global_seq(con->msgr, 0)); - - con->out_kvec[0].iov_base = &con->out_connect; - con->out_kvec[0].iov_len = sizeof(con->out_connect); - con->out_kvec_left = 1; - con->out_kvec_bytes = sizeof(con->out_connect); + if (!after_banner) { + con->out_kvec_left = 0; + con->out_kvec_bytes = 0; + } + con->out_kvec[con->out_kvec_left].iov_base = &con->out_connect; + con->out_kvec[con->out_kvec_left].iov_len = sizeof(con->out_connect); + con->out_kvec_left++; + con->out_kvec_bytes += sizeof(con->out_connect); con->out_kvec_cur = con->out_kvec; con->out_more = 0; set_bit(WRITE_PENDING, &con->state); @@ -778,6 +776,12 @@ out: /* * Prepare to read connection handshake, or an ack. */ +static void prepare_read_banner(struct ceph_connection *con) +{ + dout("prepare_read_banner %p\n", con); + con->in_base_pos = 0; +} + static void prepare_read_connect(struct ceph_connection *con) { dout("prepare_read_connect %p\n", con); @@ -829,11 +833,11 @@ static int read_partial(struct ceph_connection *con, /* * Read all or part of the connect-side handshake on a new connection */ -static int read_partial_connect(struct ceph_connection *con) +static int read_partial_banner(struct ceph_connection *con) { int ret, to = 0; - dout("read_partial_connect %p at %d\n", con, con->in_base_pos); + dout("read_partial_banner %p at %d\n", con, con->in_base_pos); /* peer's banner */ ret = read_partial(con, &to, strlen(CEPH_BANNER), con->in_banner); @@ -847,6 +851,16 @@ static int read_partial_connect(struct ceph_connection *con) &con->peer_addr_for_me); if (ret <= 0) goto out; +out: + return ret; +} + +static int read_partial_connect(struct ceph_connection *con) +{ + int ret, to = 0; + + dout("read_partial_connect %p at %d\n", con, con->in_base_pos); + ret = read_partial(con, &to, sizeof(con->in_reply), &con->in_reply); if (ret <= 0) goto out; @@ -856,6 +870,7 @@ static int read_partial_connect(struct ceph_connection *con) le32_to_cpu(con->in_reply.global_seq)); out: return ret; + } /* @@ -976,9 +991,9 @@ bad: return -EINVAL; } -static int process_connect(struct ceph_connection *con) +static int process_banner(struct ceph_connection *con) { - dout("process_connect on %p tag %d\n", con, (int)con->in_tag); + dout("process_banner on %p\n", con); if (verify_hello(con) < 0) return -1; @@ -1016,10 +1031,19 @@ static int process_connect(struct ceph_connection *con) sizeof(con->peer_addr_for_me.in_addr)); addr_set_port(&con->msgr->inst.addr.in_addr, port); encode_my_addr(con->msgr); - dout("process_connect learned my addr is %s\n", + dout("process_banner learned my addr is %s\n", pr_addr(&con->msgr->inst.addr.in_addr)); } + set_bit(NEGOTIATING, &con->state); + prepare_read_connect(con); + return 0; +} + +static int process_connect(struct ceph_connection *con) +{ + dout("process_connect on %p tag %d\n", con, (int)con->in_tag); + switch (con->in_reply.tag) { case CEPH_MSGR_TAG_BADPROTOVER: dout("process_connect got BADPROTOVER my %d != their %d\n", @@ -1053,7 +1077,7 @@ static int process_connect(struct ceph_connection *con) ENTITY_NAME(con->peer_name), pr_addr(&con->peer_addr.in_addr)); reset_connection(con); - prepare_write_connect_retry(con->msgr, con); + prepare_write_connect(con->msgr, con, 0); prepare_read_connect(con); /* Tell ceph about it. */ @@ -1071,7 +1095,7 @@ static int process_connect(struct ceph_connection *con) le32_to_cpu(con->out_connect.connect_seq), le32_to_cpu(con->in_connect.connect_seq)); con->connect_seq = le32_to_cpu(con->in_connect.connect_seq); - prepare_write_connect_retry(con->msgr, con); + prepare_write_connect(con->msgr, con, 0); prepare_read_connect(con); break; @@ -1080,19 +1104,17 @@ static int process_connect(struct ceph_connection *con) * If we sent a smaller global_seq than the peer has, try * again with a larger value. */ - dout("process_connect got RETRY_GLOBAL my %u, peer_gseq = %u\n", + dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n", con->peer_global_seq, le32_to_cpu(con->in_connect.global_seq)); get_global_seq(con->msgr, le32_to_cpu(con->in_connect.global_seq)); - prepare_write_connect_retry(con->msgr, con); + prepare_write_connect(con->msgr, con, 0); prepare_read_connect(con); break; case CEPH_MSGR_TAG_READY: clear_bit(CONNECTING, &con->state); - if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY) - set_bit(LOSSYRX, &con->state); con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq); con->connect_seq++; dout("process_connect got READY gseq %d cseq %d (%d)\n", @@ -1420,9 +1442,11 @@ more: if (test_and_clear_bit(STANDBY, &con->state)) con->connect_seq++; - prepare_write_connect(msgr, con); - prepare_read_connect(con); + prepare_write_banner(msgr, con); + prepare_write_connect(msgr, con, 1); + prepare_read_banner(con); set_bit(CONNECTING, &con->state); + clear_bit(NEGOTIATING, &con->state); con->in_tag = CEPH_MSGR_TAG_READY; dout("try_write initiating connect on %p new state %lu\n", @@ -1521,7 +1545,16 @@ more: dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag, con->in_base_pos); if (test_bit(CONNECTING, &con->state)) { - dout("try_read connecting\n"); + if (!test_bit(NEGOTIATING, &con->state)) { + dout("try_read connecting\n"); + ret = read_partial_banner(con); + if (ret <= 0) + goto done; + if (process_banner(con) < 0) { + ret = -1; + goto out; + } + } ret = read_partial_connect(con); if (ret <= 0) goto done; diff --git a/fs/ceph/messenger.h b/fs/ceph/messenger.h index e016fa7cf970..80f7e1e94448 100644 --- a/fs/ceph/messenger.h +++ b/fs/ceph/messenger.h @@ -104,8 +104,8 @@ struct ceph_msg_pos { * thread is currently opening, reading or writing data to the socket. */ #define LOSSYTX 0 /* we can close channel or drop messages on errors */ -#define LOSSYRX 1 /* peer may reset/drop messages */ -#define CONNECTING 2 +#define CONNECTING 1 +#define NEGOTIATING 2 #define KEEPALIVE_PENDING 3 #define WRITE_PENDING 4 /* we have data ready to send */ #define QUEUED 5 /* there is work queued on this connection */ -- cgit v1.2.3 From cdac830313fa6bf2831693af80fefe4aaac11b7d Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 10 Nov 2009 16:02:23 -0800 Subject: ceph: remove recon_gen logic We don't get an explicit affirmative confirmation that our caps reconnect, nor do we necessarily want to pay that cost. So, take all this code out for now. Signed-off-by: Sage Weil --- fs/ceph/caps.c | 12 +----------- fs/ceph/mds_client.c | 15 +-------------- fs/ceph/mds_client.h | 2 -- fs/ceph/super.h | 1 - 4 files changed, 2 insertions(+), 28 deletions(-) diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 775e6f6fc970..d8132b6e770d 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -610,7 +610,6 @@ retry: cap->issue_seq = seq; cap->mseq = mseq; cap->cap_gen = session->s_cap_gen; - cap->recon_gen = session->s_recon_gen; if (fmode >= 0) __ceph_get_fmode(ci, fmode); @@ -627,21 +626,13 @@ retry: static int __cap_is_valid(struct ceph_cap *cap) { unsigned long ttl; - u32 gen, recon_gen; + u32 gen; spin_lock(&cap->session->s_cap_lock); gen = cap->session->s_cap_gen; - recon_gen = cap->session->s_recon_gen; ttl = cap->session->s_cap_ttl; spin_unlock(&cap->session->s_cap_lock); - if (cap->recon_gen != recon_gen) { - dout("__cap_is_valid %p cap %p issued %s " - "but DEAD (recon_gen %u vs %u)\n", &cap->ci->vfs_inode, - cap, ceph_cap_string(cap->issued), cap->recon_gen, - recon_gen); - return 0; - } if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) { dout("__cap_is_valid %p cap %p issued %s " "but STALE (gen %u vs %u)\n", &cap->ci->vfs_inode, @@ -2213,7 +2204,6 @@ restart: issued |= implemented | __ceph_caps_dirty(ci); cap->cap_gen = session->s_cap_gen; - cap->recon_gen = session->s_recon_gen; __check_cap_issue(ci, cap, newcaps); diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 828417ae16f9..aad10d90feee 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -329,7 +329,6 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc, ceph_con_open(&s->s_con, ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); spin_lock_init(&s->s_cap_lock); - s->s_recon_gen = 0; s->s_cap_gen = 0; s->s_cap_ttl = 0; s->s_renew_requested = 0; @@ -736,25 +735,14 @@ static void remove_session_caps(struct ceph_mds_session *session) static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap, void *arg) { - struct ceph_mds_session *session = arg; - - spin_lock(&inode->i_lock); - if (cap->recon_gen != session->s_recon_gen) { - pr_err("failed reconnect %p %llx.%llx cap %p " - "(recon_gen %d < session %d)\n", inode, - ceph_vinop(inode), cap, - cap->recon_gen, session->s_recon_gen); - __ceph_remove_cap(cap, NULL); - } wake_up(&ceph_inode(inode)->i_cap_wq); - spin_unlock(&inode->i_lock); return 0; } static void wake_up_session_caps(struct ceph_mds_session *session) { dout("wake_up_session_caps %p mds%d\n", session, session->s_mds); - iterate_session_caps(session, wake_up_session_cb, session); + iterate_session_caps(session, wake_up_session_cb, NULL); } /* @@ -2052,7 +2040,6 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc, int mds) session->s_state = CEPH_MDS_SESSION_RECONNECTING; session->s_seq = 0; - session->s_recon_gen++; ceph_con_open(&session->s_con, ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index c0846b1c482b..f566e9c84295 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -98,8 +98,6 @@ struct ceph_mds_session { u64 s_seq; /* incoming msg seq # */ struct mutex s_mutex; /* serialize session messages */ - int s_recon_gen; /* inc on reconnect to recovered mds */ - struct ceph_connection s_con; /* protected by s_cap_lock */ diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 25793559a2e5..06b62c02f513 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -171,7 +171,6 @@ struct ceph_cap { int mds_wanted; u32 seq, issue_seq, mseq; u32 cap_gen; /* active/stale cycle */ - u32 recon_gen; /* mds restart reconnect cycle */ unsigned long last_used; struct list_head caps_item; }; -- cgit v1.2.3 From 09b8a7d2af83ae96dc052f9708e50140d06a9b6c Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 11 Nov 2009 15:21:27 -0800 Subject: ceph: exclude snapdir from readdir results It was hidden from sync readdir, but not the cached dcache version. Signed-off-by: Sage Weil --- fs/ceph/dir.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 4f7467961b09..32ef54367224 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -131,6 +131,7 @@ more: goto out_unlock; } if (!d_unhashed(dentry) && dentry->d_inode && + ceph_snap(dentry->d_inode) != CEPH_SNAPDIR && filp->f_pos <= di->offset) break; dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry, -- cgit v1.2.3 From b377ff13b31778c19203f3089d14080beb40a692 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 11 Nov 2009 15:22:37 -0800 Subject: ceph: initialize i_size/i_rbytes on snapdir Signed-off-by: Sage Weil --- fs/ceph/inode.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 036873c42a78..074ee42bd344 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -62,6 +62,7 @@ struct inode *ceph_get_snapdir(struct inode *parent) .snap = CEPH_SNAPDIR, }; struct inode *inode = ceph_get_inode(parent->i_sb, vino); + struct ceph_inode_info *ci = ceph_inode(inode); BUG_ON(!S_ISDIR(parent->i_mode)); if (IS_ERR(inode)) @@ -71,7 +72,8 @@ struct inode *ceph_get_snapdir(struct inode *parent) inode->i_gid = parent->i_gid; inode->i_op = &ceph_dir_iops; inode->i_fop = &ceph_dir_fops; - ceph_inode(inode)->i_snap_caps = CEPH_CAP_PIN; /* so we can open */ + ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */ + ci->i_rbytes = 0; return inode; } -- cgit v1.2.3 From fef320ff8887c702cde7ca6b8dbfff3a341d49fe Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 11 Nov 2009 15:50:12 -0800 Subject: ceph: pr_info when mds reconnect completes This helps the user know what's going on during the (involved) reconnect process. They already see when the mds fails and reconnect starts. Signed-off-by: Sage Weil --- fs/ceph/mds_client.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index aad10d90feee..44cac576f15e 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -2234,6 +2234,7 @@ static void check_new_map(struct ceph_mds_client *mdsc, */ if (oldstate < CEPH_MDS_STATE_ACTIVE && newstate >= CEPH_MDS_STATE_ACTIVE) { + pr_info("mds%d reconnect completed\n", s->s_mds); kick_requests(mdsc, i, 1); ceph_kick_flushing_caps(mdsc, s); } -- cgit v1.2.3 From 039934b895c89c2bb40aa5132efe00e60b70efca Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Thu, 12 Nov 2009 15:05:52 -0800 Subject: ceph: build cleanly without CONFIG_DEBUG_FS Signed-off-by: Sage Weil --- fs/ceph/debugfs.c | 23 +++++++++++++++++++++++ fs/ceph/mds_client.h | 2 ++ fs/ceph/mon_client.h | 2 ++ fs/ceph/osd_client.h | 2 ++ fs/ceph/super.h | 2 ++ 5 files changed, 31 insertions(+) diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c index 9edbad32f116..9b2020696680 100644 --- a/fs/ceph/debugfs.c +++ b/fs/ceph/debugfs.c @@ -8,6 +8,8 @@ #include "super.h" #include "mds_client.h" +#ifdef CONFIG_DEBUG_FS + /* * Implement /sys/kernel/debug/ceph fun * @@ -423,3 +425,24 @@ void ceph_debugfs_client_cleanup(struct ceph_client *client) debugfs_remove(client->debugfs_dir); } +#else // CONFIG_DEBUG_FS + +int __init ceph_debugfs_init(void) +{ + return 0; +} + +void ceph_debugfs_cleanup(void) +{ +} + +int ceph_debugfs_client_init(struct ceph_client *client) +{ + return 0; +} + +void ceph_debugfs_client_cleanup(struct ceph_client *client) +{ +} + +#endif // CONFIG_DEBUG_FS diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index f566e9c84295..0751b821f231 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -256,7 +256,9 @@ struct ceph_mds_client { spinlock_t cap_dirty_lock; /* protects above items */ wait_queue_head_t cap_flushing_wq; +#ifdef CONFIG_DEBUG_FS struct dentry *debugfs_file; +#endif spinlock_t dentry_lru_lock; struct list_head dentry_lru; diff --git a/fs/ceph/mon_client.h b/fs/ceph/mon_client.h index 5258c5693b03..9f6db45bf469 100644 --- a/fs/ceph/mon_client.h +++ b/fs/ceph/mon_client.h @@ -78,7 +78,9 @@ struct ceph_mon_client { int want_next_osdmap; /* 1 = want, 2 = want+asked */ u32 have_osdmap, have_mdsmap; +#ifdef CONFIG_DEBUG_FS struct dentry *debugfs_file; +#endif }; extern struct ceph_monmap *ceph_monmap_decode(void *p, void *end); diff --git a/fs/ceph/osd_client.h b/fs/ceph/osd_client.h index 9a4addf7d651..766c8dc80aff 100644 --- a/fs/ceph/osd_client.h +++ b/fs/ceph/osd_client.h @@ -83,7 +83,9 @@ struct ceph_osd_client { struct rb_root requests; /* pending requests */ int num_requests; struct delayed_work timeout_work; +#ifdef CONFIG_DEBUG_FS struct dentry *debugfs_file; +#endif mempool_t *req_mempool; diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 06b62c02f513..8aa1ffba6c0d 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -112,9 +112,11 @@ static inline unsigned long time_sub(unsigned long a, unsigned long b) */ struct ceph_client { __s64 whoami; /* my client number */ +#ifdef CONFIG_DEBUG_FS struct dentry *debugfs_monmap; struct dentry *debugfs_mdsmap, *debugfs_osdmap; struct dentry *debugfs_dir, *debugfs_dentry_lru, *debugfs_caps; +#endif struct mutex mount_mutex; /* serialize mount attempts */ struct ceph_mount_args *mount_args; -- cgit v1.2.3 From 11ea8eda064aa4dc6e44a6dade1891b69ebd5255 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Thu, 12 Nov 2009 15:07:24 -0800 Subject: ceph: fix page invalidation deadlock We occasionally want to make a best-effort attempt to invalidate cache pages without fear of blocking. If this fails, we fall back to an async invalidate in another thread. Use invalidate_mapping_pages instead of invalidate_inode_page2, as that will skip locked pages, and not deadlock. Signed-off-by: Sage Weil --- fs/ceph/caps.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index d8132b6e770d..9dd110602cda 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -1440,7 +1440,7 @@ retry_locked: dout("check_caps trying to invalidate on %p\n", inode); spin_unlock(&inode->i_lock); - ret = invalidate_inode_pages2(&inode->i_data); + ret = invalidate_mapping_pages(&inode->i_data, 0, -1); spin_lock(&inode->i_lock); if (ret == 0 && invalidating_gen == ci->i_rdcache_gen) { /* success. */ @@ -2180,7 +2180,7 @@ restart: spin_unlock(&inode->i_lock); tried_invalidate = 1; - ret = invalidate_inode_pages2(&inode->i_data); + ret = invalidate_mapping_pages(&inode->i_data, 0, -1); spin_lock(&inode->i_lock); if (ret < 0) { /* there were locked pages.. invalidate later -- cgit v1.2.3 From 42ce56e50d948fc7c1c8c3a749ee5a21a7e134f6 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 18 Nov 2009 11:22:36 -0800 Subject: ceph: remove bad calls to ceph_con_shutdown We want to ceph_con_close when we're done with the connection, before the ref count reaches 0. Once it does, do not call ceph_con_shutdown, as that takes the con mutex and may sleep, and besides that is unnecessary. Signed-off-by: Sage Weil --- fs/ceph/mds_client.c | 30 ++++++++++++++++++------------ fs/ceph/messenger.h | 1 - fs/ceph/osd_client.c | 4 +--- 3 files changed, 19 insertions(+), 16 deletions(-) diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 44cac576f15e..fdecf9984180 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -274,10 +274,8 @@ void ceph_put_mds_session(struct ceph_mds_session *s) { dout("mdsc put_session %p %d -> %d\n", s, atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1); - if (atomic_dec_and_test(&s->s_ref)) { - ceph_con_shutdown(&s->s_con); + if (atomic_dec_and_test(&s->s_ref)) kfree(s); - } } /* @@ -326,7 +324,6 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc, s->s_con.ops = &mds_con_ops; s->s_con.peer_name.type = CEPH_ENTITY_TYPE_MDS; s->s_con.peer_name.num = cpu_to_le64(mds); - ceph_con_open(&s->s_con, ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); spin_lock_init(&s->s_cap_lock); s->s_cap_gen = 0; @@ -352,7 +349,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc, dout("register_session realloc to %d\n", newmax); sa = kcalloc(newmax, sizeof(void *), GFP_NOFS); if (sa == NULL) - return ERR_PTR(-ENOMEM); + goto fail_realloc; if (mdsc->sessions) { memcpy(sa, mdsc->sessions, mdsc->max_sessions * sizeof(void *)); @@ -363,17 +360,26 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc, } mdsc->sessions[mds] = s; atomic_inc(&s->s_ref); /* one ref to sessions[], one to caller */ + + ceph_con_open(&s->s_con, ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); + return s; + +fail_realloc: + kfree(s); + return ERR_PTR(-ENOMEM); } /* * called under mdsc->mutex */ -static void unregister_session(struct ceph_mds_client *mdsc, int mds) +static void unregister_session(struct ceph_mds_client *mdsc, + struct ceph_mds_session *s) { - dout("unregister_session mds%d %p\n", mds, mdsc->sessions[mds]); - ceph_put_mds_session(mdsc->sessions[mds]); - mdsc->sessions[mds] = NULL; + dout("unregister_session mds%d %p\n", s->s_mds, s); + mdsc->sessions[s->s_mds] = NULL; + ceph_con_close(&s->s_con); + ceph_put_mds_session(s); } /* @@ -1870,7 +1876,7 @@ static void handle_session(struct ceph_mds_session *session, break; case CEPH_SESSION_CLOSE: - unregister_session(mdsc, mds); + unregister_session(mdsc, session); remove_session_caps(session); wake = 1; /* for good measure */ complete(&mdsc->session_close_waiters); @@ -2199,7 +2205,7 @@ static void check_new_map(struct ceph_mds_client *mdsc, /* the session never opened, just close it * out now */ __wake_requests(mdsc, &s->s_waiting); - unregister_session(mdsc, i); + unregister_session(mdsc, s); } else { /* just close it */ mutex_unlock(&mdsc->mutex); @@ -2724,7 +2730,7 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc) for (i = 0; i < mdsc->max_sessions; i++) { if (mdsc->sessions[i]) { session = get_session(mdsc->sessions[i]); - unregister_session(mdsc, i); + unregister_session(mdsc, session); mutex_unlock(&mdsc->mutex); mutex_lock(&session->s_mutex); remove_session_caps(session); diff --git a/fs/ceph/messenger.h b/fs/ceph/messenger.h index 80f7e1e94448..4bd85c36308e 100644 --- a/fs/ceph/messenger.h +++ b/fs/ceph/messenger.h @@ -212,7 +212,6 @@ extern void ceph_messenger_destroy(struct ceph_messenger *); extern void ceph_con_init(struct ceph_messenger *msgr, struct ceph_connection *con); -extern void ceph_con_shutdown(struct ceph_connection *con); extern void ceph_con_open(struct ceph_connection *con, struct ceph_entity_addr *addr); extern void ceph_con_close(struct ceph_connection *con); diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index 7db14ba6261c..bcb9fe693076 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -350,10 +350,8 @@ static void put_osd(struct ceph_osd *osd) { dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref), atomic_read(&osd->o_ref) - 1); - if (atomic_dec_and_test(&osd->o_ref)) { - ceph_con_shutdown(&osd->o_con); + if (atomic_dec_and_test(&osd->o_ref)) kfree(osd); - } } /* -- cgit v1.2.3 From 71ececdacae24be333c534869cb1b06357f0e215 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 18 Nov 2009 11:27:06 -0800 Subject: ceph: remove unnecessary ceph_con_shutdown We require that ceph_con_close be called before we drop the connection, so this is unneeded. Just BUG if con->sock != NULL. Signed-off-by: Sage Weil --- fs/ceph/messenger.c | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index e389656b2a6b..d8a6a56a1571 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -339,17 +339,6 @@ void ceph_con_close(struct ceph_connection *con) queue_con(con); } -/* - * clean up connection state - */ -void ceph_con_shutdown(struct ceph_connection *con) -{ - dout("con_shutdown %p\n", con); - reset_connection(con); - set_bit(DEAD, &con->state); - con_close_socket(con); /* silently ignore errors */ -} - /* * Reopen a closed connection, with a new peer address. */ @@ -380,7 +369,7 @@ void ceph_con_put(struct ceph_connection *con) atomic_read(&con->nref), atomic_read(&con->nref) - 1); BUG_ON(atomic_read(&con->nref) == 0); if (atomic_dec_and_test(&con->nref)) { - ceph_con_shutdown(con); + BUG_ON(con->sock); kfree(con); } } -- cgit v1.2.3 From 5f44f142601bf94c448e2d463f0f18fd159da164 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 18 Nov 2009 14:52:18 -0800 Subject: ceph: handle errors during osd client init Unwind initializing if we get ENOMEM during client initialization. Signed-off-by: Sage Weil --- fs/ceph/mds_client.c | 3 ++- fs/ceph/mds_client.h | 2 +- fs/ceph/osd_client.c | 15 +++++++++++---- fs/ceph/super.c | 6 +++++- 4 files changed, 19 insertions(+), 7 deletions(-) diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index fdecf9984180..69feeb1c9819 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -2552,7 +2552,7 @@ static void delayed_work(struct work_struct *work) } -void ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client) +int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client) { mdsc->client = client; mutex_init(&mdsc->mutex); @@ -2582,6 +2582,7 @@ void ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client) init_waitqueue_head(&mdsc->cap_flushing_wq); spin_lock_init(&mdsc->dentry_lru_lock); INIT_LIST_HEAD(&mdsc->dentry_lru); + return 0; } /* diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index 0751b821f231..7c439488cfab 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -282,7 +282,7 @@ extern void ceph_put_mds_session(struct ceph_mds_session *s); extern int ceph_send_msg_mds(struct ceph_mds_client *mdsc, struct ceph_msg *msg, int mds); -extern void ceph_mdsc_init(struct ceph_mds_client *mdsc, +extern int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client); extern void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc); extern void ceph_mdsc_stop(struct ceph_mds_client *mdsc); diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index bcb9fe693076..0a16c4f951f9 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -1127,19 +1127,26 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client) osdc->num_requests = 0; INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout); + err = -ENOMEM; osdc->req_mempool = mempool_create_kmalloc_pool(10, sizeof(struct ceph_osd_request)); if (!osdc->req_mempool) - return -ENOMEM; + goto out; err = ceph_msgpool_init(&osdc->msgpool_op, 4096, 10, true); if (err < 0) - return -ENOMEM; + goto out_mempool; err = ceph_msgpool_init(&osdc->msgpool_op_reply, 512, 0, false); if (err < 0) - return -ENOMEM; - + goto out_msgpool; return 0; + +out_msgpool: + ceph_msgpool_destroy(&osdc->msgpool_op); +out_mempool: + mempool_destroy(osdc->req_mempool); +out: + return err; } void ceph_osdc_stop(struct ceph_osd_client *osdc) diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 1ac7b07214f3..fe0a5962a082 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -530,9 +530,13 @@ static struct ceph_client *ceph_create_client(struct ceph_mount_args *args) err = ceph_osdc_init(&client->osdc, client); if (err < 0) goto fail_monc; - ceph_mdsc_init(&client->mdsc, client); + err = ceph_mdsc_init(&client->mdsc, client); + if (err < 0) + goto fail_osdc; return client; +fail_osdc: + ceph_osdc_stop(&client->osdc); fail_monc: ceph_monc_stop(&client->monc); fail_trunc_wq: -- cgit v1.2.3 From 4e7a5dcd1bbab6560fbc8ada29a840e7a20ed7bc Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 18 Nov 2009 16:19:57 -0800 Subject: ceph: negotiate authentication protocol; implement AUTH_NONE protocol When we open a monitor session, we send an initial AUTH message listing the auth protocols we support, our entity name, and (possibly) a previously assigned global_id. The monitor chooses a protocol and responds with an initial message. Initially implement AUTH_NONE, a dummy protocol that provides no security, but works within the new framework. It generates 'authorizers' that are used when connecting to (mds, osd) services that simply state our entity name and global_id. This is a wire protocol change. Signed-off-by: Sage Weil --- fs/ceph/Makefile | 1 + fs/ceph/auth.c | 220 +++++++++++++++++++++++++++++++++++++++++++++++++ fs/ceph/auth.h | 77 +++++++++++++++++ fs/ceph/auth_none.c | 120 +++++++++++++++++++++++++++ fs/ceph/auth_none.h | 28 +++++++ fs/ceph/ceph_fs.h | 14 +++- fs/ceph/ceph_strings.c | 13 +++ fs/ceph/decode.h | 6 ++ fs/ceph/mds_client.c | 69 +++++++++++++++- fs/ceph/mds_client.h | 4 + fs/ceph/messenger.c | 54 +++++++++++- fs/ceph/messenger.h | 10 +++ fs/ceph/mon_client.c | 210 +++++++++++++++++++++++++++++++++++++--------- fs/ceph/mon_client.h | 14 ++-- fs/ceph/msgr.h | 21 +++-- fs/ceph/osd_client.c | 63 +++++++++++++- fs/ceph/osd_client.h | 4 + fs/ceph/rados.h | 2 +- fs/ceph/super.c | 32 ++++--- fs/ceph/super.h | 2 + 20 files changed, 888 insertions(+), 76 deletions(-) create mode 100644 fs/ceph/auth.c create mode 100644 fs/ceph/auth.h create mode 100644 fs/ceph/auth_none.c create mode 100644 fs/ceph/auth_none.h diff --git a/fs/ceph/Makefile b/fs/ceph/Makefile index bdd3e6fc1609..827629c85768 100644 --- a/fs/ceph/Makefile +++ b/fs/ceph/Makefile @@ -13,6 +13,7 @@ ceph-objs := super.o inode.o dir.o file.o addr.o ioctl.o \ mon_client.o \ osd_client.o osdmap.o crush/crush.o crush/mapper.o crush/hash.o \ debugfs.o \ + auth.o auth_none.o \ ceph_fs.o ceph_strings.o ceph_hash.o ceph_frag.o else diff --git a/fs/ceph/auth.c b/fs/ceph/auth.c new file mode 100644 index 000000000000..c4d1eee827a3 --- /dev/null +++ b/fs/ceph/auth.c @@ -0,0 +1,220 @@ +#include "ceph_debug.h" + +#include +#include + +#include "types.h" +#include "auth_none.h" +#include "decode.h" +#include "super.h" + +#include "messenger.h" + +/* + * get protocol handler + */ +static u32 supported_protocols[] = { + CEPH_AUTH_NONE +}; + +int ceph_auth_init_protocol(struct ceph_auth_client *ac, int protocol) +{ + switch (protocol) { + case CEPH_AUTH_NONE: + return ceph_auth_none_init(ac); + default: + return -ENOENT; + } +} + +/* + * setup, teardown. + */ +struct ceph_auth_client *ceph_auth_init(const char *name, const char *secret) +{ + struct ceph_auth_client *ac; + int ret; + + dout("auth_init name '%s' secret '%s'\n", name, secret); + + ret = -ENOMEM; + ac = kzalloc(sizeof(*ac), GFP_NOFS); + if (!ac) + goto out; + + ac->negotiating = true; + if (name) + ac->name = name; + else + ac->name = CEPH_AUTH_NAME_DEFAULT; + dout("auth_init name %s secret %s\n", ac->name, secret); + ac->secret = secret; + return ac; + +out: + return ERR_PTR(ret); +} + +void ceph_auth_destroy(struct ceph_auth_client *ac) +{ + dout("auth_destroy %p\n", ac); + if (ac->ops) + ac->ops->destroy(ac); + kfree(ac); +} + +/* + * Reset occurs when reconnecting to the monitor. + */ +void ceph_auth_reset(struct ceph_auth_client *ac) +{ + dout("auth_reset %p\n", ac); + if (ac->ops && !ac->negotiating) + ac->ops->reset(ac); + ac->negotiating = true; +} + +int ceph_entity_name_encode(const char *name, void **p, void *end) +{ + int len = strlen(name); + + if (*p + 2*sizeof(u32) + len > end) + return -ERANGE; + ceph_encode_32(p, CEPH_ENTITY_TYPE_CLIENT); + ceph_encode_32(p, len); + ceph_encode_copy(p, name, len); + return 0; +} + +/* + * Initiate protocol negotiation with monitor. Include entity name + * and list supported protocols. + */ +int ceph_auth_build_hello(struct ceph_auth_client *ac, void *buf, size_t len) +{ + struct ceph_mon_request_header *monhdr = buf; + void *p = monhdr + 1, *end = buf + len, *lenp; + int i, num; + int ret; + + dout("auth_build_hello\n"); + monhdr->have_version = 0; + monhdr->session_mon = cpu_to_le16(-1); + monhdr->session_mon_tid = 0; + + ceph_encode_32(&p, 0); /* no protocol, yet */ + + lenp = p; + p += sizeof(u32); + + num = ARRAY_SIZE(supported_protocols); + ceph_encode_32(&p, num); + for (i = 0; i < num; i++) + ceph_encode_32(&p, supported_protocols[i]); + + ret = ceph_entity_name_encode(ac->name, &p, end); + if (ret < 0) + return ret; + ceph_decode_need(&p, end, sizeof(u64), bad); + ceph_encode_64(&p, ac->global_id); + + ceph_encode_32(&lenp, p - lenp - sizeof(u32)); + return p - buf; + +bad: + return -ERANGE; +} + +/* + * Handle auth message from monitor. + */ +int ceph_handle_auth_reply(struct ceph_auth_client *ac, + void *buf, size_t len, + void *reply_buf, size_t reply_len) +{ + void *p = buf; + void *end = buf + len; + int protocol; + s32 result; + u64 global_id; + void *payload, *payload_end; + int payload_len; + char *result_msg; + int result_msg_len; + int ret = -EINVAL; + + dout("handle_auth_reply %p %p\n", p, end); + ceph_decode_need(&p, end, sizeof(u32) * 3 + sizeof(u64), bad); + protocol = ceph_decode_32(&p); + result = ceph_decode_32(&p); + global_id = ceph_decode_64(&p); + payload_len = ceph_decode_32(&p); + payload = p; + p += payload_len; + ceph_decode_need(&p, end, sizeof(u32), bad); + result_msg_len = ceph_decode_32(&p); + result_msg = p; + p += result_msg_len; + if (p != end) + goto bad; + + dout(" result %d '%.*s' gid %llu len %d\n", result, result_msg_len, + result_msg, global_id, payload_len); + + payload_end = payload + payload_len; + + if (global_id && ac->global_id != global_id) { + dout(" set global_id %lld -> %lld\n", ac->global_id, global_id); + ac->global_id = global_id; + } + + if (ac->negotiating) { + /* set up (new) protocol handler? */ + if (ac->protocol && ac->protocol != protocol) { + ac->ops->destroy(ac); + ac->protocol = 0; + ac->ops = NULL; + } + if (ac->protocol != protocol) { + ret = ceph_auth_init_protocol(ac, protocol); + if (ret) { + pr_err("error %d on auth protocol %d init\n", + ret, protocol); + goto out; + } + } + } + + ret = ac->ops->handle_reply(ac, result, payload, payload_end); + if (ret == -EAGAIN) { + struct ceph_mon_request_header *monhdr = reply_buf; + void *p = reply_buf + 1; + void *end = reply_buf + reply_len; + + monhdr->have_version = 0; + monhdr->session_mon = cpu_to_le16(-1); + monhdr->session_mon_tid = 0; + + ceph_encode_32(&p, ac->protocol); + + ret = ac->ops->build_request(ac, p + sizeof(u32), end); + if (ret < 0) { + pr_err("error %d building request\n", ret); + goto out; + } + dout(" built request %d bytes\n", ret); + ceph_encode_32(&p, ret); + return p + ret - reply_buf; + } else if (ret) { + pr_err("authentication error %d\n", ret); + return ret; + } + return 0; + +bad: + pr_err("failed to decode auth msg\n"); +out: + return ret; +} + + diff --git a/fs/ceph/auth.h b/fs/ceph/auth.h new file mode 100644 index 000000000000..4d8cdf6bb3b6 --- /dev/null +++ b/fs/ceph/auth.h @@ -0,0 +1,77 @@ +#ifndef _FS_CEPH_AUTH_H +#define _FS_CEPH_AUTH_H + +#include "types.h" +#include "buffer.h" + +/* + * Abstract interface for communicating with the authenticate module. + * There is some handshake that takes place between us and the monitor + * to acquire the necessary keys. These are used to generate an + * 'authorizer' that we use when connecting to a service (mds, osd). + */ + +struct ceph_auth_client; +struct ceph_authorizer; + +struct ceph_auth_client_ops { + /* + * true if we are authenticated and can connect to + * services. + */ + int (*is_authenticated)(struct ceph_auth_client *ac); + + /* + * build requests and process replies during monitor + * handshake. if handle_reply returns -EAGAIN, we build + * another request. + */ + int (*build_request)(struct ceph_auth_client *ac, void *buf, void *end); + int (*handle_reply)(struct ceph_auth_client *ac, int result, + void *buf, void *end); + + /* + * Create authorizer for connecting to a service, and verify + * the response to authenticate the service. + */ + int (*create_authorizer)(struct ceph_auth_client *ac, int peer_type, + struct ceph_authorizer **a, + void **buf, size_t *len, + void **reply_buf, size_t *reply_len); + int (*verify_authorizer_reply)(struct ceph_auth_client *ac, + struct ceph_authorizer *a, size_t len); + void (*destroy_authorizer)(struct ceph_auth_client *ac, + struct ceph_authorizer *a); + + /* reset when we (re)connect to a monitor */ + void (*reset)(struct ceph_auth_client *ac); + + void (*destroy)(struct ceph_auth_client *ac); +}; + +struct ceph_auth_client { + u32 protocol; /* CEPH_AUTH_* */ + void *private; /* for use by protocol implementation */ + const struct ceph_auth_client_ops *ops; /* null iff protocol==0 */ + + bool negotiating; /* true if negotiating protocol */ + const char *name; /* entity name */ + u64 global_id; /* our unique id in system */ + const char *secret; /* our secret key */ + unsigned want_keys; /* which services we want */ +}; + +extern struct ceph_auth_client *ceph_auth_init(const char *name, + const char *secret); +extern void ceph_auth_destroy(struct ceph_auth_client *ac); + +extern void ceph_auth_reset(struct ceph_auth_client *ac); + +extern int ceph_auth_build_hello(struct ceph_auth_client *ac, + void *buf, size_t len); +extern int ceph_handle_auth_reply(struct ceph_auth_client *ac, + void *buf, size_t len, + void *reply_buf, size_t reply_len); +extern int ceph_entity_name_encode(const char *name, void **p, void *end); + +#endif diff --git a/fs/ceph/auth_none.c b/fs/ceph/auth_none.c new file mode 100644 index 000000000000..631017eb7117 --- /dev/null +++ b/fs/ceph/auth_none.c @@ -0,0 +1,120 @@ + +#include "ceph_debug.h" + +#include +#include +#include + +#include "auth_none.h" +#include "auth.h" +#include "decode.h" + +static void reset(struct ceph_auth_client *ac) +{ + struct ceph_auth_none_info *xi = ac->private; + + xi->starting = true; + xi->built_authorizer = false; +} + +static void destroy(struct ceph_auth_client *ac) +{ + kfree(ac->private); + ac->private = NULL; +} + +static int is_authenticated(struct ceph_auth_client *ac) +{ + struct ceph_auth_none_info *xi = ac->private; + + return !xi->starting; +} + +/* + * the generic auth code decode the global_id, and we carry no actual + * authenticate state, so nothing happens here. + */ +static int handle_reply(struct ceph_auth_client *ac, int result, + void *buf, void *end) +{ + struct ceph_auth_none_info *xi = ac->private; + + xi->starting = false; + return result; +} + +/* + * build an 'authorizer' with our entity_name and global_id. we can + * reuse a single static copy since it is identical for all services + * we connect to. + */ +static int ceph_auth_none_create_authorizer( + struct ceph_auth_client *ac, int peer_type, + struct ceph_authorizer **a, + void **buf, size_t *len, + void **reply_buf, size_t *reply_len) +{ + struct ceph_auth_none_info *ai = ac->private; + struct ceph_none_authorizer *au = &ai->au; + void *p, *end; + int ret; + + if (!ai->built_authorizer) { + p = au->buf; + end = p + sizeof(au->buf); + ret = ceph_entity_name_encode(ac->name, &p, end - 8); + if (ret < 0) + goto bad; + ceph_decode_need(&p, end, sizeof(u64), bad2); + ceph_encode_64(&p, ac->global_id); + au->buf_len = p - (void *)au->buf; + ai->built_authorizer = true; + dout("built authorizer len %d\n", au->buf_len); + } + + *a = (struct ceph_authorizer *)au; + *buf = au->buf; + *len = au->buf_len; + *reply_buf = au->reply_buf; + *reply_len = sizeof(au->reply_buf); + return 0; + +bad2: + ret = -ERANGE; +bad: + return ret; +} + +static void ceph_auth_none_destroy_authorizer(struct ceph_auth_client *ac, + struct ceph_authorizer *a) +{ + /* nothing to do */ +} + +static const struct ceph_auth_client_ops ceph_auth_none_ops = { + .reset = reset, + .destroy = destroy, + .is_authenticated = is_authenticated, + .handle_reply = handle_reply, + .create_authorizer = ceph_auth_none_create_authorizer, + .destroy_authorizer = ceph_auth_none_destroy_authorizer, +}; + +int ceph_auth_none_init(struct ceph_auth_client *ac) +{ + struct ceph_auth_none_info *xi; + + dout("ceph_auth_none_init %p\n", ac); + xi = kzalloc(sizeof(*xi), GFP_NOFS); + if (!xi) + return -ENOMEM; + + xi->starting = true; + xi->built_authorizer = false; + + ac->protocol = CEPH_AUTH_NONE; + ac->private = xi; + ac->ops = &ceph_auth_none_ops; + return 0; +} + diff --git a/fs/ceph/auth_none.h b/fs/ceph/auth_none.h new file mode 100644 index 000000000000..56c05533a31c --- /dev/null +++ b/fs/ceph/auth_none.h @@ -0,0 +1,28 @@ +#ifndef _FS_CEPH_AUTH_NONE_H +#define _FS_CEPH_AUTH_NONE_H + +#include "auth.h" + +/* + * null security mode. + * + * we use a single static authorizer that simply encodes our entity name + * and global id. + */ + +struct ceph_none_authorizer { + char buf[128]; + int buf_len; + char reply_buf[0]; +}; + +struct ceph_auth_none_info { + bool starting; + bool built_authorizer; + struct ceph_none_authorizer au; /* we only need one; it's static */ +}; + +extern int ceph_auth_none_init(struct ceph_auth_client *ac); + +#endif + diff --git a/fs/ceph/ceph_fs.h b/fs/ceph/ceph_fs.h index 36becb024788..1e96a9a87d8d 100644 --- a/fs/ceph/ceph_fs.h +++ b/fs/ceph/ceph_fs.h @@ -75,6 +75,16 @@ struct ceph_file_layout { int ceph_file_layout_is_valid(const struct ceph_file_layout *layout); +/* crypto algorithms */ +#define CEPH_CRYPTO_NONE 0x0 +#define CEPH_CRYPTO_AES 0x1 + +/* security/authentication protocols */ +#define CEPH_AUTH_UNKNOWN 0x0 +#define CEPH_AUTH_NONE 0x1 +#define CEPH_AUTH_CEPHX 0x2 + + /********************************************* * message layer */ @@ -90,12 +100,12 @@ int ceph_file_layout_is_valid(const struct ceph_file_layout *layout); /* client <-> monitor */ #define CEPH_MSG_MON_MAP 4 #define CEPH_MSG_MON_GET_MAP 5 -#define CEPH_MSG_CLIENT_MOUNT 10 -#define CEPH_MSG_CLIENT_MOUNT_ACK 11 #define CEPH_MSG_STATFS 13 #define CEPH_MSG_STATFS_REPLY 14 #define CEPH_MSG_MON_SUBSCRIBE 15 #define CEPH_MSG_MON_SUBSCRIBE_ACK 16 +#define CEPH_MSG_AUTH 17 +#define CEPH_MSG_AUTH_REPLY 18 /* client <-> mds */ #define CEPH_MSG_MDS_MAP 21 diff --git a/fs/ceph/ceph_strings.c b/fs/ceph/ceph_strings.c index 90d19d9d8d8f..8e4be6a80c62 100644 --- a/fs/ceph/ceph_strings.c +++ b/fs/ceph/ceph_strings.c @@ -3,6 +3,19 @@ */ #include "types.h" +const char *ceph_entity_type_name(int type) +{ + switch (type) { + case CEPH_ENTITY_TYPE_MDS: return "mds"; + case CEPH_ENTITY_TYPE_OSD: return "osd"; + case CEPH_ENTITY_TYPE_MON: return "mon"; + case CEPH_ENTITY_TYPE_CLIENT: return "client"; + case CEPH_ENTITY_TYPE_ADMIN: return "admin"; + case CEPH_ENTITY_TYPE_AUTH: return "auth"; + default: return "unknown"; + } +} + const char *ceph_osd_op_name(int op) { switch (op) { diff --git a/fs/ceph/decode.h b/fs/ceph/decode.h index a382aecc55bb..10de84896244 100644 --- a/fs/ceph/decode.h +++ b/fs/ceph/decode.h @@ -98,6 +98,7 @@ static inline void ceph_encode_addr(struct ceph_entity_addr *a) static inline void ceph_decode_addr(struct ceph_entity_addr *a) { a->in_addr.ss_family = ntohs(a->in_addr.ss_family); + WARN_ON(a->in_addr.ss_family == 512); } /* @@ -123,6 +124,11 @@ static inline void ceph_encode_8(void **p, u8 v) *(u8 *)*p = v; (*p)++; } +static inline void ceph_encode_copy(void **p, const void *s, int len) +{ + memcpy(*p, s, len); + *p += len; +} /* * filepath, string encoders diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 69feeb1c9819..8a285158aecc 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -8,6 +8,7 @@ #include "super.h" #include "messenger.h" #include "decode.h" +#include "auth.h" /* * A cluster of MDS (metadata server) daemons is responsible for @@ -274,8 +275,12 @@ void ceph_put_mds_session(struct ceph_mds_session *s) { dout("mdsc put_session %p %d -> %d\n", s, atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1); - if (atomic_dec_and_test(&s->s_ref)) + if (atomic_dec_and_test(&s->s_ref)) { + if (s->s_authorizer) + s->s_mdsc->client->monc.auth->ops->destroy_authorizer( + s->s_mdsc->client->monc.auth, s->s_authorizer); kfree(s); + } } /* @@ -2777,9 +2782,15 @@ void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg) ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad); ceph_decode_copy(&p, &fsid, sizeof(fsid)); - if (ceph_fsid_compare(&fsid, &mdsc->client->monc.monmap->fsid)) { - pr_err("got mdsmap with wrong fsid\n"); - return; + if (mdsc->client->monc.have_fsid) { + if (ceph_fsid_compare(&fsid, + &mdsc->client->monc.monmap->fsid)) { + pr_err("got mdsmap with wrong fsid\n"); + return; + } + } else { + ceph_fsid_set(&mdsc->client->monc.monmap->fsid, &fsid); + mdsc->client->monc.have_fsid = true; } epoch = ceph_decode_32(&p); maplen = ceph_decode_32(&p); @@ -2895,10 +2906,60 @@ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) ceph_msg_put(msg); } +/* + * authentication + */ +static int get_authorizer(struct ceph_connection *con, + void **buf, int *len, int *proto, + void **reply_buf, int *reply_len, int force_new) +{ + struct ceph_mds_session *s = con->private; + struct ceph_mds_client *mdsc = s->s_mdsc; + struct ceph_auth_client *ac = mdsc->client->monc.auth; + int ret = 0; + + if (force_new && s->s_authorizer) { + ac->ops->destroy_authorizer(ac, s->s_authorizer); + s->s_authorizer = NULL; + } + if (s->s_authorizer == NULL) { + if (ac->ops->create_authorizer) { + ret = ac->ops->create_authorizer( + ac, CEPH_ENTITY_TYPE_MDS, + &s->s_authorizer, + &s->s_authorizer_buf, + &s->s_authorizer_buf_len, + &s->s_authorizer_reply_buf, + &s->s_authorizer_reply_buf_len); + if (ret) + return ret; + } + } + + *proto = ac->protocol; + *buf = s->s_authorizer_buf; + *len = s->s_authorizer_buf_len; + *reply_buf = s->s_authorizer_reply_buf; + *reply_len = s->s_authorizer_reply_buf_len; + return 0; +} + + +static int verify_authorizer_reply(struct ceph_connection *con, int len) +{ + struct ceph_mds_session *s = con->private; + struct ceph_mds_client *mdsc = s->s_mdsc; + struct ceph_auth_client *ac = mdsc->client->monc.auth; + + return ac->ops->verify_authorizer_reply(ac, s->s_authorizer, len); +} + const static struct ceph_connection_operations mds_con_ops = { .get = con_get, .put = con_put, .dispatch = dispatch, + .get_authorizer = get_authorizer, + .verify_authorizer_reply = verify_authorizer_reply, .peer_reset = peer_reset, .alloc_msg = ceph_alloc_msg, .alloc_middle = ceph_alloc_middle, diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index 7c439488cfab..9faa1b2f79a7 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -100,6 +100,10 @@ struct ceph_mds_session { struct ceph_connection s_con; + struct ceph_authorizer *s_authorizer; + void *s_authorizer_buf, *s_authorizer_reply_buf; + size_t s_authorizer_buf_len, s_authorizer_reply_buf_len; + /* protected by s_cap_lock */ spinlock_t s_cap_lock; u32 s_cap_gen; /* inc each time we get mds stale msg */ diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index d8a6a56a1571..0b1674854b25 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -550,6 +550,27 @@ static void prepare_write_keepalive(struct ceph_connection *con) * Connection negotiation. */ +static void prepare_connect_authorizer(struct ceph_connection *con) +{ + void *auth_buf; + int auth_len = 0; + int auth_protocol = 0; + + if (con->ops->get_authorizer) + con->ops->get_authorizer(con, &auth_buf, &auth_len, + &auth_protocol, &con->auth_reply_buf, + &con->auth_reply_buf_len, + con->auth_retry); + + con->out_connect.authorizer_protocol = cpu_to_le32(auth_protocol); + con->out_connect.authorizer_len = cpu_to_le32(auth_len); + + con->out_kvec[con->out_kvec_left].iov_base = auth_buf; + con->out_kvec[con->out_kvec_left].iov_len = auth_len; + con->out_kvec_left++; + con->out_kvec_bytes += auth_len; +} + /* * We connected to a peer and are saying hello. */ @@ -592,6 +613,7 @@ static void prepare_write_connect(struct ceph_messenger *msgr, dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con, con->connect_seq, global_seq, proto); + con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT); con->out_connect.connect_seq = cpu_to_le32(con->connect_seq); con->out_connect.global_seq = cpu_to_le32(global_seq); @@ -611,6 +633,8 @@ static void prepare_write_connect(struct ceph_messenger *msgr, con->out_kvec_cur = con->out_kvec; con->out_more = 0; set_bit(WRITE_PENDING, &con->state); + + prepare_connect_authorizer(con); } @@ -777,6 +801,13 @@ static void prepare_read_connect(struct ceph_connection *con) con->in_base_pos = 0; } +static void prepare_read_connect_retry(struct ceph_connection *con) +{ + dout("prepare_read_connect_retry %p\n", con); + con->in_base_pos = strlen(CEPH_BANNER) + sizeof(con->actual_peer_addr) + + sizeof(con->peer_addr_for_me); +} + static void prepare_read_ack(struct ceph_connection *con) { dout("prepare_read_ack %p\n", con); @@ -853,9 +884,14 @@ static int read_partial_connect(struct ceph_connection *con) ret = read_partial(con, &to, sizeof(con->in_reply), &con->in_reply); if (ret <= 0) goto out; + ret = read_partial(con, &to, le32_to_cpu(con->in_reply.authorizer_len), + con->auth_reply_buf); + if (ret <= 0) + goto out; - dout("read_partial_connect %p connect_seq = %u, global_seq = %u\n", - con, le32_to_cpu(con->in_reply.connect_seq), + dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n", + con, (int)con->in_reply.tag, + le32_to_cpu(con->in_reply.connect_seq), le32_to_cpu(con->in_reply.global_seq)); out: return ret; @@ -1051,6 +1087,20 @@ static int process_connect(struct ceph_connection *con) set_bit(CLOSED, &con->state); /* in case there's queued work */ return -1; + case CEPH_MSGR_TAG_BADAUTHORIZER: + con->auth_retry++; + dout("process_connect %p got BADAUTHORIZER attempt %d\n", con, + con->auth_retry); + if (con->auth_retry == 2) { + con->error_msg = "connect authorization failure"; + reset_connection(con); + set_bit(CLOSED, &con->state); + return -1; + } + con->auth_retry = 1; + prepare_write_connect(con->msgr, con, 0); + prepare_read_connect_retry(con); + break; case CEPH_MSGR_TAG_RESETSESSION: /* diff --git a/fs/ceph/messenger.h b/fs/ceph/messenger.h index 4bd85c36308e..f9c9f6487302 100644 --- a/fs/ceph/messenger.h +++ b/fs/ceph/messenger.h @@ -26,6 +26,12 @@ struct ceph_connection_operations { /* handle an incoming message. */ void (*dispatch) (struct ceph_connection *con, struct ceph_msg *m); + /* authorize an outgoing connection */ + int (*get_authorizer) (struct ceph_connection *con, + void **buf, int *len, int *proto, + void **reply_buf, int *reply_len, int force_new); + int (*verify_authorizer_reply) (struct ceph_connection *con, int len); + /* protocol version mismatch */ void (*bad_proto) (struct ceph_connection *con); @@ -144,6 +150,10 @@ struct ceph_connection { attempt for this connection, client */ u32 peer_global_seq; /* peer's global seq for this connection */ + int auth_retry; /* true if we need a newer authorizer */ + void *auth_reply_buf; /* where to put the authorizer reply */ + int auth_reply_buf_len; + /* out queue */ struct mutex out_mutex; struct list_head out_queue; diff --git a/fs/ceph/mon_client.c b/fs/ceph/mon_client.c index 95b76e761e18..017d5aef8834 100644 --- a/fs/ceph/mon_client.c +++ b/fs/ceph/mon_client.c @@ -6,6 +6,7 @@ #include "mon_client.h" #include "super.h" +#include "auth.h" #include "decode.h" /* @@ -38,6 +39,10 @@ struct ceph_monmap *ceph_monmap_decode(void *p, void *end) struct ceph_fsid fsid; u32 epoch, num_mon; u16 version; + u32 len; + + ceph_decode_32_safe(&p, end, len, bad); + ceph_decode_need(&p, end, len, bad); dout("monmap_decode %p %p len %d\n", p, end, (int)(end-p)); @@ -95,8 +100,10 @@ static void __close_session(struct ceph_mon_client *monc) { if (monc->con) { dout("__close_session closing mon%d\n", monc->cur_mon); + ceph_con_revoke(monc->con, monc->m_auth); ceph_con_close(monc->con); monc->cur_mon = -1; + ceph_auth_reset(monc->auth); } } @@ -106,6 +113,7 @@ static void __close_session(struct ceph_mon_client *monc) static int __open_session(struct ceph_mon_client *monc) { char r; + int ret; if (monc->cur_mon < 0) { get_random_bytes(&r, 1); @@ -121,6 +129,15 @@ static int __open_session(struct ceph_mon_client *monc) monc->con->peer_name.num = cpu_to_le64(monc->cur_mon); ceph_con_open(monc->con, &monc->monmap->mon_inst[monc->cur_mon].addr); + + /* initiatiate authentication handshake */ + ret = ceph_auth_build_hello(monc->auth, + monc->m_auth->front.iov_base, + monc->m_auth->front_max); + monc->m_auth->front.iov_len = ret; + monc->m_auth->hdr.front_len = cpu_to_le32(ret); + ceph_msg_get(monc->m_auth); /* keep our ref */ + ceph_con_send(monc->con, monc->m_auth); } else { dout("open_session mon%d already open\n", monc->cur_mon); } @@ -139,7 +156,7 @@ static void __schedule_delayed(struct ceph_mon_client *monc) { unsigned delay; - if (monc->cur_mon < 0 || monc->want_mount || __sub_expired(monc)) + if (monc->cur_mon < 0 || __sub_expired(monc)) delay = 10 * HZ; else delay = 20 * HZ; @@ -161,7 +178,7 @@ static void __send_subscribe(struct ceph_mon_client *monc) struct ceph_mon_subscribe_item *i; void *p, *end; - msg = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 64, 0, 0, NULL); + msg = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 96, 0, 0, NULL); if (!msg) return; @@ -173,7 +190,7 @@ static void __send_subscribe(struct ceph_mon_client *monc) if (monc->want_next_osdmap) { dout("__send_subscribe to 'osdmap' %u\n", (unsigned)monc->have_osdmap); - ceph_encode_32(&p, 2); + ceph_encode_32(&p, 3); ceph_encode_string(&p, end, "osdmap", 6); i = p; i->have = cpu_to_le64(monc->have_osdmap); @@ -181,13 +198,18 @@ static void __send_subscribe(struct ceph_mon_client *monc) p += sizeof(*i); monc->want_next_osdmap = 2; /* requested */ } else { - ceph_encode_32(&p, 1); + ceph_encode_32(&p, 2); } ceph_encode_string(&p, end, "mdsmap", 6); i = p; i->have = cpu_to_le64(monc->have_mdsmap); i->onetime = 0; p += sizeof(*i); + ceph_encode_string(&p, end, "monmap", 6); + i = p; + i->have = 0; + i->onetime = 0; + p += sizeof(*i); msg->front.iov_len = p - msg->front.iov_base; msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); @@ -256,7 +278,7 @@ void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc) mutex_unlock(&monc->mutex); } - +#if 0 /* * mount */ @@ -264,12 +286,8 @@ static void __request_mount(struct ceph_mon_client *monc) { struct ceph_msg *msg; struct ceph_client_mount *h; - int err; dout("__request_mount\n"); - err = __open_session(monc); - if (err) - return; msg = ceph_msg_new(CEPH_MSG_CLIENT_MOUNT, sizeof(*h), 0, 0, NULL); if (IS_ERR(msg)) return; @@ -279,8 +297,12 @@ static void __request_mount(struct ceph_mon_client *monc) h->monhdr.session_mon_tid = 0; ceph_con_send(monc->con, msg); } +#endif -int ceph_monc_request_mount(struct ceph_mon_client *monc) +/* + * + */ +int ceph_monc_open_session(struct ceph_mon_client *monc) { if (!monc->con) { monc->con = kmalloc(sizeof(*monc->con), GFP_KERNEL); @@ -292,12 +314,14 @@ int ceph_monc_request_mount(struct ceph_mon_client *monc) } mutex_lock(&monc->mutex); - __request_mount(monc); + __open_session(monc); __schedule_delayed(monc); mutex_unlock(&monc->mutex); return 0; } +#if 0 + /* * The monitor responds with mount ack indicate mount success. The * included client ticket allows the client to talk to MDSs and OSDs. @@ -372,9 +396,65 @@ out: mutex_unlock(&monc->mutex); wake_up(&client->mount_wq); } +#endif + +/* + * The monitor responds with mount ack indicate mount success. The + * included client ticket allows the client to talk to MDSs and OSDs. + */ +static void ceph_monc_handle_map(struct ceph_mon_client *monc, struct ceph_msg *msg) +{ + struct ceph_client *client = monc->client; + struct ceph_monmap *monmap = NULL, *old = monc->monmap; + void *p, *end; + + mutex_lock(&monc->mutex); + + dout("handle_monmap\n"); + p = msg->front.iov_base; + end = p + msg->front.iov_len; + + monmap = ceph_monmap_decode(p, end); + if (IS_ERR(monmap)) { + pr_err("problem decoding monmap, %d\n", + (int)PTR_ERR(monmap)); + return; + } + if (monc->have_fsid && + ceph_fsid_compare(&monmap->fsid, &monc->monmap->fsid)) { + print_hex_dump(KERN_ERR, "monmap->fsid: ", DUMP_PREFIX_NONE, 16, 1, + (void *)&monmap->fsid, 16, 0); + print_hex_dump(KERN_ERR, "monc->monmap->fsid: ", DUMP_PREFIX_NONE, 16, 1, + (void *)&monc->monmap->fsid, 16, 0); + + pr_err("fsid mismatch, got a previous map with different fsid"); + kfree(monmap); + return; + } + + client->monc.monmap = monmap; + client->monc.have_fsid = true; + kfree(old); + + mutex_unlock(&monc->mutex); + wake_up(&client->mount_wq); +} + +/* + * init client info after authentication + */ +static void __init_authenticated_client(struct ceph_mon_client *monc) +{ + struct ceph_client *client = monc->client; + client->signed_ticket = NULL; + client->signed_ticket_len = 0; + client->msgr->inst.name.type = CEPH_ENTITY_TYPE_CLIENT; + client->msgr->inst.name.num = monc->auth->global_id; + ceph_debugfs_client_init(client); +} /* * statfs @@ -414,12 +494,8 @@ static int send_statfs(struct ceph_mon_client *monc, { struct ceph_msg *msg; struct ceph_mon_statfs *h; - int err; dout("send_statfs tid %llu\n", req->tid); - err = __open_session(monc); - if (err) - return err; msg = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), 0, 0, NULL); if (IS_ERR(msg)) return PTR_ERR(msg); @@ -514,17 +590,14 @@ static void delayed_work(struct work_struct *work) dout("monc delayed_work\n"); mutex_lock(&monc->mutex); - if (monc->want_mount) { - __request_mount(monc); + if (monc->hunting) { + __close_session(monc); + __open_session(monc); /* continue hunting */ } else { - if (monc->hunting) { - __close_session(monc); - __open_session(monc); /* continue hunting */ - } else { - ceph_con_keepalive(monc->con); - } + ceph_con_keepalive(monc->con); + if (monc->auth->ops->is_authenticated(monc->auth)) + __send_subscribe(monc); } - __send_subscribe(monc); __schedule_delayed(monc); mutex_unlock(&monc->mutex); } @@ -555,6 +628,7 @@ static int build_initial_monmap(struct ceph_mon_client *monc) monc->monmap->mon_inst[i].name.num = cpu_to_le64(i); } monc->monmap->num_mon = num_mon; + monc->have_fsid = false; /* release addr memory */ kfree(args->mon_addr); @@ -579,21 +653,37 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl) monc->con = NULL; + /* authentication */ + monc->auth = ceph_auth_init(cl->mount_args->name, + cl->mount_args->secret); + if (IS_ERR(monc->auth)) + return PTR_ERR(monc->auth); + monc->auth->want_keys = + CEPH_ENTITY_TYPE_AUTH | CEPH_ENTITY_TYPE_MON | + CEPH_ENTITY_TYPE_OSD | CEPH_ENTITY_TYPE_MDS; + /* msg pools */ - err = ceph_msgpool_init(&monc->msgpool_mount_ack, 4096, 1, false); - if (err < 0) - goto out; err = ceph_msgpool_init(&monc->msgpool_subscribe_ack, sizeof(struct ceph_mon_subscribe_ack), 1, false); if (err < 0) - goto out; + goto out_monmap; err = ceph_msgpool_init(&monc->msgpool_statfs_reply, sizeof(struct ceph_mon_statfs_reply), 0, false); if (err < 0) - goto out; + goto out_pool1; + err = ceph_msgpool_init(&monc->msgpool_auth_reply, 4096, 1, false); + if (err < 0) + goto out_pool2; + + monc->m_auth = ceph_msg_new(CEPH_MSG_AUTH, 4096, 0, 0, NULL); + if (IS_ERR(monc->m_auth)) { + err = PTR_ERR(monc->m_auth); + monc->m_auth = NULL; + goto out_pool3; + } monc->cur_mon = -1; - monc->hunting = false; /* not really */ + monc->hunting = true; monc->sub_renew_after = jiffies; monc->sub_sent = 0; @@ -605,7 +695,16 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl) monc->have_mdsmap = 0; monc->have_osdmap = 0; monc->want_next_osdmap = 1; - monc->want_mount = true; + return 0; + +out_pool3: + ceph_msgpool_destroy(&monc->msgpool_auth_reply); +out_pool2: + ceph_msgpool_destroy(&monc->msgpool_subscribe_ack); +out_pool1: + ceph_msgpool_destroy(&monc->msgpool_statfs_reply); +out_monmap: + kfree(monc->monmap); out: return err; } @@ -624,14 +723,44 @@ void ceph_monc_stop(struct ceph_mon_client *monc) } mutex_unlock(&monc->mutex); - ceph_msgpool_destroy(&monc->msgpool_mount_ack); + ceph_auth_destroy(monc->auth); + + ceph_msg_put(monc->m_auth); ceph_msgpool_destroy(&monc->msgpool_subscribe_ack); ceph_msgpool_destroy(&monc->msgpool_statfs_reply); + ceph_msgpool_destroy(&monc->msgpool_auth_reply); kfree(monc->monmap); } +static void handle_auth_reply(struct ceph_mon_client *monc, + struct ceph_msg *msg) +{ + int ret; + + mutex_lock(&monc->mutex); + ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base, + msg->front.iov_len, + monc->m_auth->front.iov_base, + monc->m_auth->front_max); + if (ret < 0) { + monc->client->mount_err = ret; + wake_up(&monc->client->mount_wq); + } else if (ret > 0) { + monc->m_auth->front.iov_len = ret; + monc->m_auth->hdr.front_len = cpu_to_le32(ret); + ceph_msg_get(monc->m_auth); /* keep our ref */ + ceph_con_send(monc->con, monc->m_auth); + } else if (monc->auth->ops->is_authenticated(monc->auth)) { + dout("authenticated, starting session\n"); + __init_authenticated_client(monc); + __send_subscribe(monc); + __resend_statfs(monc); + } + mutex_unlock(&monc->mutex); +} + /* * handle incoming message */ @@ -644,8 +773,8 @@ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) return; switch (type) { - case CEPH_MSG_CLIENT_MOUNT_ACK: - handle_mount_ack(monc, msg); + case CEPH_MSG_AUTH_REPLY: + handle_auth_reply(monc, msg); break; case CEPH_MSG_MON_SUBSCRIBE_ACK: @@ -656,6 +785,10 @@ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) handle_statfs_reply(monc, msg); break; + case CEPH_MSG_MON_MAP: + ceph_monc_handle_map(monc, msg); + break; + case CEPH_MSG_MDS_MAP: ceph_mdsc_handle_map(&monc->client->mdsc, msg); break; @@ -682,12 +815,12 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con, int front = le32_to_cpu(hdr->front_len); switch (type) { - case CEPH_MSG_CLIENT_MOUNT_ACK: - return ceph_msgpool_get(&monc->msgpool_mount_ack, front); case CEPH_MSG_MON_SUBSCRIBE_ACK: return ceph_msgpool_get(&monc->msgpool_subscribe_ack, front); case CEPH_MSG_STATFS_REPLY: return ceph_msgpool_get(&monc->msgpool_statfs_reply, front); + case CEPH_MSG_AUTH_REPLY: + return ceph_msgpool_get(&monc->msgpool_auth_reply, front); } return ceph_alloc_msg(con, hdr); } @@ -717,10 +850,7 @@ static void mon_fault(struct ceph_connection *con) if (!monc->hunting) { /* start hunting */ monc->hunting = true; - if (__open_session(monc) == 0) { - __send_subscribe(monc); - __resend_statfs(monc); - } + __open_session(monc); } else { /* already hunting, let's wait a bit */ __schedule_delayed(monc); diff --git a/fs/ceph/mon_client.h b/fs/ceph/mon_client.h index 9f6db45bf469..c75b53302ecc 100644 --- a/fs/ceph/mon_client.h +++ b/fs/ceph/mon_client.h @@ -9,6 +9,7 @@ struct ceph_client; struct ceph_mount_args; +struct ceph_auth_client; /* * The monitor map enumerates the set of all monitors. @@ -58,23 +59,26 @@ struct ceph_mon_client { struct mutex mutex; struct delayed_work delayed_work; + struct ceph_auth_client *auth; + struct ceph_msg *m_auth; + bool hunting; int cur_mon; /* last monitor i contacted */ unsigned long sub_sent, sub_renew_after; struct ceph_connection *con; + bool have_fsid; /* msg pools */ - struct ceph_msgpool msgpool_mount_ack; struct ceph_msgpool msgpool_subscribe_ack; struct ceph_msgpool msgpool_statfs_reply; + struct ceph_msgpool msgpool_auth_reply; /* pending statfs requests */ struct radix_tree_root statfs_request_tree; int num_statfs_requests; u64 last_tid; - /* mds/osd map or mount requests */ - bool want_mount; + /* mds/osd map */ int want_next_osdmap; /* 1 = want, 2 = want+asked */ u32 have_osdmap, have_mdsmap; @@ -101,11 +105,11 @@ extern int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 have); extern void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc); -extern int ceph_monc_request_mount(struct ceph_mon_client *monc); - extern int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf); +extern int ceph_monc_open_session(struct ceph_mon_client *monc); + #endif diff --git a/fs/ceph/msgr.h b/fs/ceph/msgr.h index 8e3ea2eb1bf5..c758e8f8f71b 100644 --- a/fs/ceph/msgr.h +++ b/fs/ceph/msgr.h @@ -21,7 +21,7 @@ * whenever the wire protocol changes. try to keep this string length * constant. */ -#define CEPH_BANNER "ceph v023" +#define CEPH_BANNER "ceph v024" #define CEPH_BANNER_MAX_LEN 30 @@ -46,11 +46,16 @@ struct ceph_entity_name { __le64 num; } __attribute__ ((packed)); -#define CEPH_ENTITY_TYPE_MON 1 -#define CEPH_ENTITY_TYPE_MDS 2 -#define CEPH_ENTITY_TYPE_OSD 3 -#define CEPH_ENTITY_TYPE_CLIENT 4 -#define CEPH_ENTITY_TYPE_ADMIN 5 +#define CEPH_ENTITY_TYPE_MON 0x01 +#define CEPH_ENTITY_TYPE_MDS 0x02 +#define CEPH_ENTITY_TYPE_OSD 0x04 +#define CEPH_ENTITY_TYPE_CLIENT 0x08 +#define CEPH_ENTITY_TYPE_ADMIN 0x10 +#define CEPH_ENTITY_TYPE_AUTH 0x20 + +#define CEPH_ENTITY_TYPE_ANY 0xFF + +extern const char *ceph_entity_type_name(int type); /* * entity_addr -- network address @@ -94,6 +99,7 @@ struct ceph_entity_inst { #define CEPH_MSGR_TAG_ACK 8 /* message ack */ #define CEPH_MSGR_TAG_KEEPALIVE 9 /* just a keepalive byte! */ #define CEPH_MSGR_TAG_BADPROTOVER 10 /* bad protocol version */ +#define CEPH_MSGR_TAG_BADAUTHORIZER 11 /* bad authorizer */ /* @@ -104,6 +110,8 @@ struct ceph_msg_connect { __le32 global_seq; /* count connections initiated by this host */ __le32 connect_seq; /* count connections initiated in this session */ __le32 protocol_version; + __le32 authorizer_protocol; + __le32 authorizer_len; __u8 flags; /* CEPH_MSG_CONNECT_* */ } __attribute__ ((packed)); @@ -112,6 +120,7 @@ struct ceph_msg_connect_reply { __le32 global_seq; __le32 connect_seq; __le32 protocol_version; + __le32 authorizer_len; __u8 flags; } __attribute__ ((packed)); diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index 0a16c4f951f9..ca0ee68c322a 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -11,6 +11,7 @@ #include "osd_client.h" #include "messenger.h" #include "decode.h" +#include "auth.h" const static struct ceph_connection_operations osd_con_ops; @@ -331,6 +332,7 @@ static struct ceph_osd *create_osd(struct ceph_osd_client *osdc) osd->o_con.private = osd; osd->o_con.ops = &osd_con_ops; osd->o_con.peer_name.type = CEPH_ENTITY_TYPE_OSD; + return osd; } @@ -880,9 +882,15 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg) /* verify fsid */ ceph_decode_need(&p, end, sizeof(fsid), bad); ceph_decode_copy(&p, &fsid, sizeof(fsid)); - if (ceph_fsid_compare(&fsid, &osdc->client->monc.monmap->fsid)) { - pr_err("got osdmap with wrong fsid, ignoring\n"); - return; + if (osdc->client->monc.have_fsid) { + if (ceph_fsid_compare(&fsid, + &osdc->client->monc.monmap->fsid)) { + pr_err("got osdmap with wrong fsid, ignoring\n"); + return; + } + } else { + ceph_fsid_set(&osdc->client->monc.monmap->fsid, &fsid); + osdc->client->monc.have_fsid = true; } down_write(&osdc->map_sem); @@ -1302,10 +1310,59 @@ static void put_osd_con(struct ceph_connection *con) put_osd(osd); } +/* + * authentication + */ +static int get_authorizer(struct ceph_connection *con, + void **buf, int *len, int *proto, + void **reply_buf, int *reply_len, int force_new) +{ + struct ceph_osd *o = con->private; + struct ceph_osd_client *osdc = o->o_osdc; + struct ceph_auth_client *ac = osdc->client->monc.auth; + int ret = 0; + + if (force_new && o->o_authorizer) { + ac->ops->destroy_authorizer(ac, o->o_authorizer); + o->o_authorizer = NULL; + } + if (o->o_authorizer == NULL) { + ret = ac->ops->create_authorizer( + ac, CEPH_ENTITY_TYPE_OSD, + &o->o_authorizer, + &o->o_authorizer_buf, + &o->o_authorizer_buf_len, + &o->o_authorizer_reply_buf, + &o->o_authorizer_reply_buf_len); + if (ret) + return ret; + } + + *proto = ac->protocol; + *buf = o->o_authorizer_buf; + *len = o->o_authorizer_buf_len; + *reply_buf = o->o_authorizer_reply_buf; + *reply_len = o->o_authorizer_reply_buf_len; + return 0; +} + + +static int verify_authorizer_reply(struct ceph_connection *con, int len) +{ + struct ceph_osd *o = con->private; + struct ceph_osd_client *osdc = o->o_osdc; + struct ceph_auth_client *ac = osdc->client->monc.auth; + + return ac->ops->verify_authorizer_reply(ac, o->o_authorizer, len); +} + + const static struct ceph_connection_operations osd_con_ops = { .get = get_osd_con, .put = put_osd_con, .dispatch = dispatch, + .get_authorizer = get_authorizer, + .verify_authorizer_reply = verify_authorizer_reply, .alloc_msg = alloc_msg, .fault = osd_reset, .alloc_middle = ceph_alloc_middle, diff --git a/fs/ceph/osd_client.h b/fs/ceph/osd_client.h index 766c8dc80aff..3d4ae6595aaa 100644 --- a/fs/ceph/osd_client.h +++ b/fs/ceph/osd_client.h @@ -13,6 +13,7 @@ struct ceph_msg; struct ceph_snap_context; struct ceph_osd_request; struct ceph_osd_client; +struct ceph_authorizer; /* * completion callback for async writepages @@ -29,6 +30,9 @@ struct ceph_osd { struct rb_node o_node; struct ceph_connection o_con; struct list_head o_requests; + struct ceph_authorizer *o_authorizer; + void *o_authorizer_buf, *o_authorizer_reply_buf; + size_t o_authorizer_buf_len, o_authorizer_reply_buf_len; }; /* an in-flight request */ diff --git a/fs/ceph/rados.h b/fs/ceph/rados.h index fb23ff9297c9..12bfb2f7c275 100644 --- a/fs/ceph/rados.h +++ b/fs/ceph/rados.h @@ -157,7 +157,6 @@ struct ceph_eversion { #define CEPH_OSD_OP_MODE_WR 0x2000 #define CEPH_OSD_OP_MODE_RMW 0x3000 #define CEPH_OSD_OP_MODE_SUB 0x4000 -#define CEPH_OSD_OP_MODE_EXEC 0x8000 #define CEPH_OSD_OP_TYPE 0x0f00 #define CEPH_OSD_OP_TYPE_LOCK 0x0100 @@ -285,6 +284,7 @@ enum { CEPH_OSD_FLAG_BALANCE_READS = 256, CEPH_OSD_FLAG_PARALLELEXEC = 512, /* execute op in parallel */ CEPH_OSD_FLAG_PGOP = 1024, /* pg op, no object */ + CEPH_OSD_FLAG_EXEC = 2048, /* op may exec */ }; enum { diff --git a/fs/ceph/super.c b/fs/ceph/super.c index fe0a5962a082..c901395ae8a1 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -128,6 +128,8 @@ static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt) seq_puts(m, ",noasyncreaddir"); if (strcmp(args->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT)) seq_printf(m, ",snapdirname=%s", args->snapdir_name); + if (args->name) + seq_printf(m, ",name=%s", args->name); if (args->secret) seq_puts(m, ",secret="); return 0; @@ -224,12 +226,12 @@ const char *ceph_msg_type_name(int type) switch (type) { case CEPH_MSG_SHUTDOWN: return "shutdown"; case CEPH_MSG_PING: return "ping"; + case CEPH_MSG_AUTH: return "auth"; + case CEPH_MSG_AUTH_REPLY: return "auth_reply"; case CEPH_MSG_MON_MAP: return "mon_map"; case CEPH_MSG_MON_GET_MAP: return "mon_get_map"; case CEPH_MSG_MON_SUBSCRIBE: return "mon_subscribe"; case CEPH_MSG_MON_SUBSCRIBE_ACK: return "mon_subscribe_ack"; - case CEPH_MSG_CLIENT_MOUNT: return "client_mount"; - case CEPH_MSG_CLIENT_MOUNT_ACK: return "client_mount_ack"; case CEPH_MSG_STATFS: return "statfs"; case CEPH_MSG_STATFS_REPLY: return "statfs_reply"; case CEPH_MSG_MDS_MAP: return "mds_map"; @@ -267,6 +269,7 @@ enum { Opt_last_int, /* int args above */ Opt_snapdirname, + Opt_name, Opt_secret, Opt_last_string, /* string args above */ @@ -293,6 +296,7 @@ static match_table_t arg_tokens = { {Opt_readdir_max_entries, "readdir_max_entries=%d"}, /* int args above */ {Opt_snapdirname, "snapdirname=%s"}, + {Opt_name, "name=%s"}, {Opt_secret, "secret=%s"}, /* string args above */ {Opt_ip, "ip=%s"}, @@ -407,6 +411,11 @@ static struct ceph_mount_args *parse_mount_args(int flags, char *options, argstr[0].to-argstr[0].from, GFP_KERNEL); break; + case Opt_name: + args->name = kstrndup(argstr[0].from, + argstr[0].to-argstr[0].from, + GFP_KERNEL); + break; case Opt_secret: args->secret = kstrndup(argstr[0].from, argstr[0].to-argstr[0].from, @@ -476,6 +485,8 @@ static void destroy_mount_args(struct ceph_mount_args *args) dout("destroy_mount_args %p\n", args); kfree(args->snapdir_name); args->snapdir_name = NULL; + kfree(args->name); + args->name = NULL; kfree(args->secret); args->secret = NULL; kfree(args); @@ -657,27 +668,23 @@ static int ceph_mount(struct ceph_client *client, struct vfsmount *mnt, client->msgr->nocrc = ceph_test_opt(client, NOCRC); } - /* send mount request, and wait for mon, mds, and osd maps */ - err = ceph_monc_request_mount(&client->monc); + /* open session, and wait for mon, mds, and osd maps */ + err = ceph_monc_open_session(&client->monc); if (err < 0) goto out; - while (!have_mon_map(client) && !client->mount_err) { + while (!have_mon_map(client)) { err = -EIO; if (timeout && time_after_eq(jiffies, started + timeout)) goto out; /* wait */ - dout("mount waiting for mount\n"); - err = wait_event_interruptible_timeout(client->mount_wq, - client->mount_err || have_mon_map(client), + dout("mount waiting for mon_map\n"); + err = wait_event_interruptible_timeout(client->mount_wq, /* FIXME */ + have_mon_map(client), timeout); if (err == -EINTR || err == -ERESTARTSYS) goto out; - if (client->mount_err) { - err = client->mount_err; - goto out; - } } dout("mount opening root\n"); @@ -795,7 +802,6 @@ static int ceph_register_bdi(struct super_block *sb, struct ceph_client *client) client->backing_dev_info.ra_pages = (client->mount_args->rsize + PAGE_CACHE_SIZE - 1) >> PAGE_SHIFT; - err = bdi_register_dev(&client->backing_dev_info, sb->s_dev); return err; } diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 8aa1ffba6c0d..e0e8130959b6 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -61,6 +61,7 @@ struct ceph_mount_args { int max_readdir; /* max readdir size */ int osd_timeout; char *snapdir_name; /* default ".snap" */ + char *name; char *secret; int cap_release_safety; }; @@ -75,6 +76,7 @@ struct ceph_mount_args { #define CEPH_MSG_MAX_DATA_LEN (16*1024*1024) #define CEPH_SNAPDIRNAME_DEFAULT ".snap" +#define CEPH_AUTH_NAME_DEFAULT "guest" /* * Delay telling the MDS we no longer want caps, in case we reopen -- cgit v1.2.3 From b9bfb93ce2b1ef668254f0b9e16fcc5246d65d8e Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 18 Nov 2009 15:08:44 -0800 Subject: ceph: move mempool creation to ceph_create_client Signed-off-by: Sage Weil --- fs/ceph/super.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/fs/ceph/super.c b/fs/ceph/super.c index c901395ae8a1..df05617aca86 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -534,10 +534,18 @@ static struct ceph_client *ceph_create_client(struct ceph_mount_args *args) if (client->trunc_wq == NULL) goto fail_pg_inv_wq; + /* set up mempools */ + err = -ENOMEM; + client->wb_pagevec_pool = mempool_create_kmalloc_pool(10, + client->mount_args->wsize >> PAGE_CACHE_SHIFT); + if (!client->wb_pagevec_pool) + goto fail_trunc_wq; + + /* subsystems */ err = ceph_monc_init(&client->monc, client); if (err < 0) - goto fail_trunc_wq; + goto fail_mempool; err = ceph_osdc_init(&client->osdc, client); if (err < 0) goto fail_monc; @@ -550,6 +558,8 @@ fail_osdc: ceph_osdc_stop(&client->osdc); fail_monc: ceph_monc_stop(&client->monc); +fail_mempool: + mempool_destroy(client->wb_pagevec_pool); fail_trunc_wq: destroy_workqueue(client->trunc_wq); fail_pg_inv_wq: @@ -581,8 +591,7 @@ static void ceph_destroy_client(struct ceph_client *client) if (client->msgr) ceph_messenger_destroy(client->msgr); - if (client->wb_pagevec_pool) - mempool_destroy(client->wb_pagevec_pool); + mempool_destroy(client->wb_pagevec_pool); destroy_mount_args(client->mount_args); @@ -845,14 +854,6 @@ static int ceph_get_sb(struct file_system_type *fs_type, dout("get_sb got existing client %p\n", client); } else { dout("get_sb using new client %p\n", client); - - /* set up mempools */ - err = -ENOMEM; - client->wb_pagevec_pool = mempool_create_kmalloc_pool(10, - client->mount_args->wsize >> PAGE_CACHE_SHIFT); - if (!client->wb_pagevec_pool) - goto out_splat; - err = ceph_register_bdi(sb, client); if (err < 0) goto out_splat; -- cgit v1.2.3 From cfea1cf42b614583c02727d5bffd5a2384e92bda Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 18 Nov 2009 16:50:55 -0800 Subject: ceph: small cleanup in hash function Signed-off-by: Sage Weil --- fs/ceph/ceph_hash.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/ceph/ceph_hash.c b/fs/ceph/ceph_hash.c index ac8be54631fe..1c44e43fe89e 100644 --- a/fs/ceph/ceph_hash.c +++ b/fs/ceph/ceph_hash.c @@ -85,7 +85,7 @@ unsigned ceph_str_hash_linux(const char *str, unsigned length) unsigned long hash = 0; unsigned char c; - while (length-- > 0) { + while (length--) { c = *str++; hash = (hash + (c << 4) + (c >> 4)) * 11; } -- cgit v1.2.3 From 0743304d871559cb4c7c066357de2caa60e94c2f Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 18 Nov 2009 16:50:41 -0800 Subject: ceph: fix debugfs entry, simplify fsid checks We may first learn our fsid from any of the mon, osd, or mds maps (whichever the monitor sends first). Consolidate checks in a single helper. Initialize the client debugfs entry then, since we need the fsid (and global_id) for the directory name. Also remove dead mount code. Signed-off-by: Sage Weil --- fs/ceph/debugfs.c | 4 +- fs/ceph/mds_client.c | 12 +----- fs/ceph/mon_client.c | 113 ++++----------------------------------------------- fs/ceph/osd_client.c | 12 +----- fs/ceph/super.c | 32 ++++++++++++--- fs/ceph/super.h | 18 ++++---- 6 files changed, 51 insertions(+), 140 deletions(-) diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c index 9b2020696680..b90fc3e1ff70 100644 --- a/fs/ceph/debugfs.c +++ b/fs/ceph/debugfs.c @@ -7,6 +7,8 @@ #include "super.h" #include "mds_client.h" +#include "mon_client.h" +#include "auth.h" #ifdef CONFIG_DEBUG_FS @@ -335,7 +337,7 @@ int ceph_debugfs_client_init(struct ceph_client *client) char name[80]; snprintf(name, sizeof(name), FSID_FORMAT ".client%lld", - PR_FSID(&client->monc.monmap->fsid), client->whoami); + PR_FSID(&client->fsid), client->monc.auth->global_id); client->debugfs_dir = debugfs_create_dir(name, ceph_debugfs_dir); if (!client->debugfs_dir) diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 8a285158aecc..8d95b0f051e4 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -2782,16 +2782,8 @@ void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg) ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad); ceph_decode_copy(&p, &fsid, sizeof(fsid)); - if (mdsc->client->monc.have_fsid) { - if (ceph_fsid_compare(&fsid, - &mdsc->client->monc.monmap->fsid)) { - pr_err("got mdsmap with wrong fsid\n"); - return; - } - } else { - ceph_fsid_set(&mdsc->client->monc.monmap->fsid, &fsid); - mdsc->client->monc.have_fsid = true; - } + if (ceph_check_fsid(mdsc->client, &fsid) < 0) + return; epoch = ceph_decode_32(&p); maplen = ceph_decode_32(&p); dout("handle_map epoch %u len %d\n", epoch, (int)maplen); diff --git a/fs/ceph/mon_client.c b/fs/ceph/mon_client.c index 017d5aef8834..b742b3b3e0f3 100644 --- a/fs/ceph/mon_client.c +++ b/fs/ceph/mon_client.c @@ -320,89 +320,12 @@ int ceph_monc_open_session(struct ceph_mon_client *monc) return 0; } -#if 0 - -/* - * The monitor responds with mount ack indicate mount success. The - * included client ticket allows the client to talk to MDSs and OSDs. - */ -static void handle_mount_ack(struct ceph_mon_client *monc, struct ceph_msg *msg) -{ - struct ceph_client *client = monc->client; - struct ceph_monmap *monmap = NULL, *old = monc->monmap; - void *p, *end; - s32 result; - u32 len; - s64 cnum; - int err = -EINVAL; - - if (client->whoami >= 0) { - dout("handle_mount_ack - already mounted\n"); - return; - } - - mutex_lock(&monc->mutex); - - dout("handle_mount_ack\n"); - p = msg->front.iov_base; - end = p + msg->front.iov_len; - - ceph_decode_64_safe(&p, end, cnum, bad); - ceph_decode_32_safe(&p, end, result, bad); - ceph_decode_32_safe(&p, end, len, bad); - if (result) { - pr_err("mount denied: %.*s (%d)\n", len, (char *)p, - result); - err = result; - goto out; - } - p += len; - - ceph_decode_32_safe(&p, end, len, bad); - ceph_decode_need(&p, end, len, bad); - monmap = ceph_monmap_decode(p, p + len); - if (IS_ERR(monmap)) { - pr_err("problem decoding monmap, %d\n", - (int)PTR_ERR(monmap)); - err = -EINVAL; - goto out; - } - p += len; - - client->monc.monmap = monmap; - kfree(old); - - client->signed_ticket = NULL; - client->signed_ticket_len = 0; - - monc->want_mount = false; - - client->whoami = cnum; - client->msgr->inst.name.type = CEPH_ENTITY_TYPE_CLIENT; - client->msgr->inst.name.num = cpu_to_le64(cnum); - pr_info("client%lld fsid " FSID_FORMAT "\n", - client->whoami, PR_FSID(&client->monc.monmap->fsid)); - - ceph_debugfs_client_init(client); - __send_subscribe(monc); - - err = 0; - goto out; - -bad: - pr_err("error decoding mount_ack message\n"); -out: - client->mount_err = err; - mutex_unlock(&monc->mutex); - wake_up(&client->mount_wq); -} -#endif - /* * The monitor responds with mount ack indicate mount success. The * included client ticket allows the client to talk to MDSs and OSDs. */ -static void ceph_monc_handle_map(struct ceph_mon_client *monc, struct ceph_msg *msg) +static void ceph_monc_handle_map(struct ceph_mon_client *monc, + struct ceph_msg *msg) { struct ceph_client *client = monc->client; struct ceph_monmap *monmap = NULL, *old = monc->monmap; @@ -420,42 +343,19 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc, struct ceph_msg * (int)PTR_ERR(monmap)); return; } - if (monc->have_fsid && - ceph_fsid_compare(&monmap->fsid, &monc->monmap->fsid)) { - print_hex_dump(KERN_ERR, "monmap->fsid: ", DUMP_PREFIX_NONE, 16, 1, - (void *)&monmap->fsid, 16, 0); - print_hex_dump(KERN_ERR, "monc->monmap->fsid: ", DUMP_PREFIX_NONE, 16, 1, - (void *)&monc->monmap->fsid, 16, 0); - - pr_err("fsid mismatch, got a previous map with different fsid"); + + if (ceph_check_fsid(monc->client, &monmap->fsid) < 0) { kfree(monmap); return; } client->monc.monmap = monmap; - client->monc.have_fsid = true; kfree(old); mutex_unlock(&monc->mutex); wake_up(&client->mount_wq); } - -/* - * init client info after authentication - */ -static void __init_authenticated_client(struct ceph_mon_client *monc) -{ - struct ceph_client *client = monc->client; - - client->signed_ticket = NULL; - client->signed_ticket_len = 0; - client->msgr->inst.name.type = CEPH_ENTITY_TYPE_CLIENT; - client->msgr->inst.name.num = monc->auth->global_id; - - ceph_debugfs_client_init(client); -} - /* * statfs */ @@ -754,7 +654,10 @@ static void handle_auth_reply(struct ceph_mon_client *monc, ceph_con_send(monc->con, monc->m_auth); } else if (monc->auth->ops->is_authenticated(monc->auth)) { dout("authenticated, starting session\n"); - __init_authenticated_client(monc); + + monc->client->msgr->inst.name.type = CEPH_ENTITY_TYPE_CLIENT; + monc->client->msgr->inst.name.num = monc->auth->global_id; + __send_subscribe(monc); __resend_statfs(monc); } diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index ca0ee68c322a..d63f192999ee 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -882,16 +882,8 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg) /* verify fsid */ ceph_decode_need(&p, end, sizeof(fsid), bad); ceph_decode_copy(&p, &fsid, sizeof(fsid)); - if (osdc->client->monc.have_fsid) { - if (ceph_fsid_compare(&fsid, - &osdc->client->monc.monmap->fsid)) { - pr_err("got osdmap with wrong fsid, ignoring\n"); - return; - } - } else { - ceph_fsid_set(&osdc->client->monc.monmap->fsid, &fsid); - osdc->client->monc.have_fsid = true; - } + if (ceph_check_fsid(osdc->client, &fsid) < 0) + return; down_write(&osdc->map_sem); diff --git a/fs/ceph/super.c b/fs/ceph/super.c index df05617aca86..3df6d4ab236c 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -19,6 +19,7 @@ #include "decode.h" #include "super.h" #include "mon_client.h" +#include "auth.h" /* * Ceph superblock operations @@ -510,14 +511,11 @@ static struct ceph_client *ceph_create_client(struct ceph_mount_args *args) client->sb = NULL; client->mount_state = CEPH_MOUNT_MOUNTING; - client->whoami = -1; client->mount_args = args; client->msgr = NULL; client->mount_err = 0; - client->signed_ticket = NULL; - client->signed_ticket_len = 0; err = bdi_init(&client->backing_dev_info); if (err < 0) @@ -582,8 +580,6 @@ static void ceph_destroy_client(struct ceph_client *client) ceph_monc_stop(&client->monc); ceph_osdc_stop(&client->osdc); - kfree(client->signed_ticket); - ceph_debugfs_client_cleanup(client); destroy_workqueue(client->wb_wq); destroy_workqueue(client->pg_inv_wq); @@ -599,6 +595,32 @@ static void ceph_destroy_client(struct ceph_client *client) dout("destroy_client %p done\n", client); } +/* + * Initially learn our fsid, or verify an fsid matches. + */ +int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid) +{ + if (client->have_fsid) { + if (ceph_fsid_compare(&client->fsid, fsid)) { + print_hex_dump(KERN_ERR, "this fsid: ", + DUMP_PREFIX_NONE, 16, 1, + (void *)fsid, 16, 0); + print_hex_dump(KERN_ERR, " old fsid: ", + DUMP_PREFIX_NONE, 16, 1, + (void *)&client->fsid, 16, 0); + pr_err("fsid mismatch\n"); + return -1; + } + } else { + pr_info("client%lld fsid " FSID_FORMAT "\n", + client->monc.auth->global_id, PR_FSID(fsid)); + memcpy(&client->fsid, fsid, sizeof(*fsid)); + ceph_debugfs_client_init(client); + client->have_fsid = true; + } + return 0; +} + /* * true if we have the mon map (and have thus joined the cluster) */ diff --git a/fs/ceph/super.h b/fs/ceph/super.h index e0e8130959b6..de5e32414978 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -113,16 +113,11 @@ static inline unsigned long time_sub(unsigned long a, unsigned long b) * mounting the same ceph filesystem/cluster. */ struct ceph_client { - __s64 whoami; /* my client number */ -#ifdef CONFIG_DEBUG_FS - struct dentry *debugfs_monmap; - struct dentry *debugfs_mdsmap, *debugfs_osdmap; - struct dentry *debugfs_dir, *debugfs_dentry_lru, *debugfs_caps; -#endif + struct ceph_fsid fsid; + bool have_fsid; struct mutex mount_mutex; /* serialize mount attempts */ struct ceph_mount_args *mount_args; - struct ceph_fsid fsid; struct super_block *sb; @@ -130,8 +125,6 @@ struct ceph_client { wait_queue_head_t mount_wq; int mount_err; - void *signed_ticket; /* our keys to the kingdom */ - int signed_ticket_len; struct ceph_messenger *msgr; /* messenger instance */ struct ceph_mon_client monc; @@ -145,6 +138,12 @@ struct ceph_client { struct workqueue_struct *trunc_wq; struct backing_dev_info backing_dev_info; + +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs_monmap; + struct dentry *debugfs_mdsmap, *debugfs_osdmap; + struct dentry *debugfs_dir, *debugfs_dentry_lru, *debugfs_caps; +#endif }; static inline struct ceph_client *ceph_client(struct super_block *sb) @@ -735,6 +734,7 @@ extern struct kmem_cache *ceph_dentry_cachep; extern struct kmem_cache *ceph_file_cachep; extern const char *ceph_msg_type_name(int type); +extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid); #define FSID_FORMAT "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-" \ "%02x%02x%02x%02x%02x%02x" -- cgit v1.2.3 From 94045e115ee72aee3b17295791da07078f2f778c Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Thu, 19 Nov 2009 15:31:50 -0800 Subject: ceph: decode updated mdsmap format The mds map now uses the global_id as the 'key' (instead of the addr, which was a poor choice). This is protocol change. Signed-off-by: Sage Weil --- fs/ceph/ceph_fs.h | 2 +- fs/ceph/mdsmap.c | 15 +++++++++------ fs/ceph/mdsmap.h | 1 + 3 files changed, 11 insertions(+), 7 deletions(-) diff --git a/fs/ceph/ceph_fs.h b/fs/ceph/ceph_fs.h index 1e96a9a87d8d..4e5f49c738d8 100644 --- a/fs/ceph/ceph_fs.h +++ b/fs/ceph/ceph_fs.h @@ -39,7 +39,7 @@ #define CEPH_MDS_PROTOCOL 9 /* cluster internal */ #define CEPH_MON_PROTOCOL 5 /* cluster internal */ #define CEPH_OSDC_PROTOCOL 22 /* server/client */ -#define CEPH_MDSC_PROTOCOL 29 /* server/client */ +#define CEPH_MDSC_PROTOCOL 30 /* server/client */ #define CEPH_MONC_PROTOCOL 15 /* server/client */ diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c index 4226c810ce22..cad8d25861e5 100644 --- a/fs/ceph/mdsmap.c +++ b/fs/ceph/mdsmap.c @@ -76,6 +76,7 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end) /* pick out active nodes from mds_info (state > 0) */ n = ceph_decode_32(p); for (i = 0; i < n; i++) { + u64 global_id; u32 namelen; s32 mds, inc, state; u64 state_seq; @@ -84,10 +85,10 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end) u32 num_export_targets; void *pexport_targets = NULL; - ceph_decode_need(p, end, sizeof(addr) + 1 + sizeof(u32), bad); - ceph_decode_copy(p, &addr, sizeof(addr)); - ceph_decode_addr(&addr); + ceph_decode_need(p, end, sizeof(u64)*2 + 1 + sizeof(u32), bad); + global_id = ceph_decode_64(p); infoversion = ceph_decode_8(p); + *p += sizeof(u64); namelen = ceph_decode_32(p); /* skip mds name */ *p += namelen; @@ -99,7 +100,8 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end) inc = ceph_decode_32(p); state = ceph_decode_32(p); state_seq = ceph_decode_64(p); - *p += sizeof(addr); + ceph_decode_copy(p, &addr, sizeof(addr)); + ceph_decode_addr(&addr); *p += sizeof(struct ceph_timespec); *p += sizeof(u32); ceph_decode_32_safe(p, end, namelen, bad); @@ -112,10 +114,11 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end) num_export_targets = 0; } - dout("mdsmap_decode %d/%d mds%d.%d %s %s\n", - i+1, n, mds, inc, pr_addr(&addr.in_addr), + dout("mdsmap_decode %d/%d %lld mds%d.%d %s %s\n", + i+1, n, global_id, mds, inc, pr_addr(&addr.in_addr), ceph_mds_state_name(state)); if (mds >= 0 && mds < m->m_max_mds && state > 0) { + m->m_info[mds].global_id = global_id; m->m_info[mds].state = state; m->m_info[mds].addr = addr; m->m_info[mds].num_export_targets = num_export_targets; diff --git a/fs/ceph/mdsmap.h b/fs/ceph/mdsmap.h index d317308648fb..eacc131aa5cb 100644 --- a/fs/ceph/mdsmap.h +++ b/fs/ceph/mdsmap.h @@ -9,6 +9,7 @@ * we limit fields to those the client actually xcares about */ struct ceph_mds_info { + u64 global_id; struct ceph_entity_addr addr; s32 state; int num_export_targets; -- cgit v1.2.3 From dc14657c9c946f25b84a98e9ffa41b812a70699e Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Fri, 20 Nov 2009 13:59:13 -0800 Subject: ceph: mount fails immediately on error Signed-off-by: Yehuda Sadeh --- fs/ceph/auth.c | 5 +++++ fs/ceph/super.c | 6 +++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/fs/ceph/auth.c b/fs/ceph/auth.c index c4d1eee827a3..32f2e2a021ab 100644 --- a/fs/ceph/auth.c +++ b/fs/ceph/auth.c @@ -169,6 +169,11 @@ int ceph_handle_auth_reply(struct ceph_auth_client *ac, } if (ac->negotiating) { + /* server does not support our protocols? */ + if (!protocol && result < 0) { + ret = result; + goto out; + } /* set up (new) protocol handler? */ if (ac->protocol && ac->protocol != protocol) { ac->ops->destroy(ac); diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 3df6d4ab236c..a828943296c5 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -712,10 +712,14 @@ static int ceph_mount(struct ceph_client *client, struct vfsmount *mnt, /* wait */ dout("mount waiting for mon_map\n"); err = wait_event_interruptible_timeout(client->mount_wq, /* FIXME */ - have_mon_map(client), + have_mon_map(client) || (client->mount_err < 0), timeout); if (err == -EINTR || err == -ERESTARTSYS) goto out; + if (client->mount_err < 0) { + err = client->mount_err; + goto out; + } } dout("mount opening root\n"); -- cgit v1.2.3 From 0dc2570fab222affe7739b88b5ed04c511d433dc Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 20 Nov 2009 13:43:45 -0800 Subject: ceph: reset requested max_size after mds reconnect The max_size increase request to the MDS can get lost during an MDS restart and reconnect. Reset our requested value after the MDS recovers, so that any blocked writes will re-request a larger max_size upon waking. Also, explicit wake session caps after the reconnect. Normally the cap renewal catches this, but not in the cases where the caps didn't go stale in the first place, which would leave writers waiting on max_size asleep. Signed-off-by: Sage Weil --- fs/ceph/mds_client.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 8d95b0f051e4..7da836909abb 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -746,14 +746,24 @@ static void remove_session_caps(struct ceph_mds_session *session) static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap, void *arg) { - wake_up(&ceph_inode(inode)->i_cap_wq); + struct ceph_inode_info *ci = ceph_inode(inode); + + wake_up(&ci->i_cap_wq); + if (arg) { + spin_lock(&inode->i_lock); + ci->i_wanted_max_size = 0; + ci->i_requested_max_size = 0; + spin_unlock(&inode->i_lock); + } return 0; } -static void wake_up_session_caps(struct ceph_mds_session *session) +static void wake_up_session_caps(struct ceph_mds_session *session, + int reconnect) { dout("wake_up_session_caps %p mds%d\n", session, session->s_mds); - iterate_session_caps(session, wake_up_session_cb, NULL); + iterate_session_caps(session, wake_up_session_cb, + (void *)(unsigned long)reconnect); } /* @@ -794,6 +804,8 @@ static int send_renew_caps(struct ceph_mds_client *mdsc, /* * Note new cap ttl, and any transition from stale -> not stale (fresh?). + * + * Called under session->s_mutex */ static void renewed_caps(struct ceph_mds_client *mdsc, struct ceph_mds_session *session, int is_renew) @@ -822,7 +834,7 @@ static void renewed_caps(struct ceph_mds_client *mdsc, spin_unlock(&session->s_cap_lock); if (wake) - wake_up_session_caps(session); + wake_up_session_caps(session, 0); } /* @@ -2248,6 +2260,7 @@ static void check_new_map(struct ceph_mds_client *mdsc, pr_info("mds%d reconnect completed\n", s->s_mds); kick_requests(mdsc, i, 1); ceph_kick_flushing_caps(mdsc, s); + wake_up_session_caps(s, 1); } } } -- cgit v1.2.3 From 03c677e1d189ff62891d9f278c55bb798a418b81 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 20 Nov 2009 15:14:15 -0800 Subject: ceph: reset msgr backoff during open, not after successful handshake Reset the backoff delay when we reopen the connection, so that the delays for any initial connection problems are reasonable. We were resetting only after a successful handshake, which was of limited utility. Signed-off-by: Sage Weil --- fs/ceph/messenger.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index 0b1674854b25..45cec31fdf5e 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -348,6 +348,7 @@ void ceph_con_open(struct ceph_connection *con, struct ceph_entity_addr *addr) set_bit(OPENING, &con->state); clear_bit(CLOSED, &con->state); memcpy(&con->peer_addr, addr, sizeof(*addr)); + con->delay = 0; /* reset backoff memory */ queue_con(con); } @@ -1162,8 +1163,6 @@ static int process_connect(struct ceph_connection *con) con->connect_seq); WARN_ON(con->connect_seq != le32_to_cpu(con->in_reply.connect_seq)); - - con->delay = 0; /* reset backoff memory */ prepare_read_tag(con); break; -- cgit v1.2.3 From b19a29af74c09553b9fef95cdf6e9af3df65f544 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 20 Nov 2009 14:44:18 -0800 Subject: ceph: remove dead code Left over from mount/auth protocol changes. Signed-off-by: Sage Weil --- fs/ceph/mon_client.c | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/fs/ceph/mon_client.c b/fs/ceph/mon_client.c index b742b3b3e0f3..9ff2da69d33a 100644 --- a/fs/ceph/mon_client.c +++ b/fs/ceph/mon_client.c @@ -278,27 +278,6 @@ void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc) mutex_unlock(&monc->mutex); } -#if 0 -/* - * mount - */ -static void __request_mount(struct ceph_mon_client *monc) -{ - struct ceph_msg *msg; - struct ceph_client_mount *h; - - dout("__request_mount\n"); - msg = ceph_msg_new(CEPH_MSG_CLIENT_MOUNT, sizeof(*h), 0, 0, NULL); - if (IS_ERR(msg)) - return; - h = msg->front.iov_base; - h->monhdr.have_version = 0; - h->monhdr.session_mon = cpu_to_le16(-1); - h->monhdr.session_mon_tid = 0; - ceph_con_send(monc->con, msg); -} -#endif - /* * */ -- cgit v1.2.3 From 60d877334f7d9f5f2417ea4a83c1def769286102 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sat, 21 Nov 2009 12:53:08 +0100 Subject: fs/ceph: introduce missing kfree Error handling code following a kmalloc should free the allocated data. The semantic match that finds this problem is as follows: (http://www.emn.fr/x-info/coccinelle/) // @r exists@ local idexpression x; statement S; expression E; identifier f,f1,l; position p1,p2; expression *ptr != NULL; @@ x@p1 = \(kmalloc\|kzalloc\|kcalloc\)(...); ... if (x == NULL) S <... when != x when != if (...) { <+...x...+> } ( x->f1 = E | (x->f1 == NULL || ...) | f(...,x->f1,...) ) ...> ( return \(0\|<+...x...+>\|ptr\); | return@p2 ...; ) @script:python@ p1 << r.p1; p2 << r.p2; @@ print "* file: %s kmalloc %s return %s" % (p1[0].file,p1[0].line,p2[0].line) // Signed-off-by: Julia Lawall Signed-off-by: Sage Weil --- fs/ceph/xattr.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index 1a48a55c5109..04769a3ab832 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c @@ -655,8 +655,10 @@ static int ceph_sync_setxattr(struct dentry *dentry, const char *name, /* do request */ req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETXATTR, USE_AUTH_MDS); - if (IS_ERR(req)) - return PTR_ERR(req); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto out; + } req->r_inode = igrab(inode); req->r_inode_drop = CEPH_CAP_XATTR_SHARED; req->r_num_caps = 1; -- cgit v1.2.3 From 32c895e776a0dd2cb701d60fbd6440280c09ce35 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sat, 21 Nov 2009 16:53:16 +0100 Subject: fs/ceph: Move a dereference below a NULL test If the NULL test is necessary, then the dereference should be moved below the NULL test. The semantic patch that makes this change is as follows: (http://coccinelle.lip6.fr/). // @@ type T; expression E; identifier i,fld; statement S; @@ - T i = E->fld; + T i; ... when != E when != i if (E == NULL) S + i = E->fld; // Signed-off-by: Julia Lawall Signed-off-by: Sage Weil --- fs/ceph/osd_client.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index d63f192999ee..5d30d5959b97 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -1249,11 +1249,12 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) { struct ceph_osd *osd = con->private; - struct ceph_osd_client *osdc = osd->o_osdc; + struct ceph_osd_client *osdc; int type = le16_to_cpu(msg->hdr.type); if (!osd) return; + osdc = osd->o_osdc; switch (type) { case CEPH_MSG_OSD_MAP: -- cgit v1.2.3 From 75eb3592811028e5b01835126483d115532a3aa1 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Sat, 21 Nov 2009 13:08:14 -0800 Subject: ceph: remove useless IS_ERR checks ceph_lookup_snap_realm either returns a valid pointer or NULL; there is no need to check IS_ERR(result). Reported-by: Julia Lawall Signed-off-by: Sage Weil --- fs/ceph/snap.c | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index 2e3cb40b7e48..52f46a1208f5 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -226,8 +226,6 @@ static int adjust_snap_realm_parent(struct ceph_mds_client *mdsc, return 0; parent = ceph_lookup_snap_realm(mdsc, parentino); - if (IS_ERR(parent)) - return PTR_ERR(parent); if (!parent) { parent = ceph_create_snap_realm(mdsc, parentino); if (IS_ERR(parent)) @@ -541,10 +539,6 @@ more: p += sizeof(u64) * le32_to_cpu(ri->num_prior_parent_snaps); realm = ceph_lookup_snap_realm(mdsc, le64_to_cpu(ri->ino)); - if (IS_ERR(realm)) { - err = PTR_ERR(realm); - goto fail; - } if (!realm) { realm = ceph_create_snap_realm(mdsc, le64_to_cpu(ri->ino)); if (IS_ERR(realm)) { @@ -762,8 +756,6 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc, ri = p; realm = ceph_lookup_snap_realm(mdsc, split); - if (IS_ERR(realm)) - goto out; if (!realm) { realm = ceph_create_snap_realm(mdsc, split); if (IS_ERR(realm)) @@ -829,8 +821,6 @@ skip_inode: struct ceph_snap_realm *child = ceph_lookup_snap_realm(mdsc, le64_to_cpu(split_realms[i])); - if (IS_ERR(child)) - continue; if (!child) continue; adjust_snap_realm_parent(mdsc, child, realm->ino); -- cgit v1.2.3 From 34b43a56b9b103a7a820032177131532d9dbdbe8 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 1 Dec 2009 12:23:54 -0800 Subject: ceph: plug leak of request_mutex Fix leak of osd client request_mutex on receiving dup ack. Signed-off-by: Sage Weil --- fs/ceph/osd_client.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index 5d30d5959b97..d600073f1d3f 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -739,6 +739,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg) req->r_got_reply = 1; } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) { dout("handle_reply tid %llu dup ack\n", tid); + mutex_unlock(&osdc->request_mutex); goto done; } -- cgit v1.2.3 From 50b885b96c903e420a1eac54dd27626244704a06 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 1 Dec 2009 14:12:07 -0800 Subject: ceph: whitespace cleanup Signed-off-by: Sage Weil --- fs/ceph/caps.c | 2 +- fs/ceph/ceph_hash.c | 8 ++++---- fs/ceph/crush/mapper.c | 2 +- fs/ceph/mon_client.c | 2 +- fs/ceph/osd_client.c | 4 ++-- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 9dd110602cda..9b9ce143ac1f 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -1316,7 +1316,7 @@ static int __mark_caps_flushing(struct inode *inode, struct ceph_mds_client *mdsc = &ceph_client(inode->i_sb)->mdsc; struct ceph_inode_info *ci = ceph_inode(inode); int flushing; - + BUG_ON(ci->i_dirty_caps == 0); BUG_ON(list_empty(&ci->i_dirty_item)); diff --git a/fs/ceph/ceph_hash.c b/fs/ceph/ceph_hash.c index 1c44e43fe89e..bd570015d147 100644 --- a/fs/ceph/ceph_hash.c +++ b/fs/ceph/ceph_hash.c @@ -82,14 +82,14 @@ unsigned ceph_str_hash_rjenkins(const char *str, unsigned length) */ unsigned ceph_str_hash_linux(const char *str, unsigned length) { - unsigned long hash = 0; + unsigned long hash = 0; unsigned char c; - while (length--) { + while (length--) { c = *str++; hash = (hash + (c << 4) + (c >> 4)) * 11; } - return hash; + return hash; } @@ -105,7 +105,7 @@ unsigned ceph_str_hash(int type, const char *s, unsigned len) } } -const char *ceph_str_hash_name(int type) +const char *ceph_str_hash_name(int type) { switch (type) { case CEPH_STR_HASH_LINUX: diff --git a/fs/ceph/crush/mapper.c b/fs/ceph/crush/mapper.c index 2523d448445c..9ba54efb6543 100644 --- a/fs/ceph/crush/mapper.c +++ b/fs/ceph/crush/mapper.c @@ -254,7 +254,7 @@ static int crush_bucket_choose(struct crush_bucket *in, int x, int r) x, r); default: BUG_ON(1); - return in->items[0]; + return in->items[0]; } } diff --git a/fs/ceph/mon_client.c b/fs/ceph/mon_client.c index 9ff2da69d33a..1dd0dc258c50 100644 --- a/fs/ceph/mon_client.c +++ b/fs/ceph/mon_client.c @@ -279,7 +279,7 @@ void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc) } /* - * + * */ int ceph_monc_open_session(struct ceph_mon_client *monc) { diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index d600073f1d3f..d639c74e749f 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -1308,8 +1308,8 @@ static void put_osd_con(struct ceph_connection *con) * authentication */ static int get_authorizer(struct ceph_connection *con, - void **buf, int *len, int *proto, - void **reply_buf, int *reply_len, int force_new) + void **buf, int *len, int *proto, + void **reply_buf, int *reply_len, int force_new) { struct ceph_osd *o = con->private; struct ceph_osd_client *osdc = o->o_osdc; -- cgit v1.2.3 From 1d1de9160e0d8aff0d67a21137b62e63ffd6f184 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 2 Dec 2009 11:54:25 -0800 Subject: ceph: hide /.ceph from readdir results We need to skip /.ceph in (cached) readdir results, and exclude "/.ceph" from the cached ENOENT lookup check. Signed-off-by: Sage Weil --- fs/ceph/ceph_fs.h | 1 + fs/ceph/dir.c | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/fs/ceph/ceph_fs.h b/fs/ceph/ceph_fs.h index 4e5f49c738d8..699196a10c66 100644 --- a/fs/ceph/ceph_fs.h +++ b/fs/ceph/ceph_fs.h @@ -44,6 +44,7 @@ #define CEPH_INO_ROOT 1 +#define CEPH_INO_CEPH 2 /* hidden .ceph dir */ /* arbitrary limit on max # of monitors (cluster of 3 is typical) */ #define CEPH_MAX_MON 31 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 32ef54367224..89ce3ba4a614 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -132,6 +132,7 @@ more: } if (!d_unhashed(dentry) && dentry->d_inode && ceph_snap(dentry->d_inode) != CEPH_SNAPDIR && + ceph_ino(dentry->d_inode) != CEPH_INO_CEPH && filp->f_pos <= di->offset) break; dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry, @@ -512,6 +513,12 @@ struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, return dentry; } +static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry) +{ + return ceph_ino(inode) == CEPH_INO_ROOT && + strncmp(dentry->d_name.name, ".ceph", 5) == 0; +} + /* * Look up a single dir entry. If there is a lookup intent, inform * the MDS so that it gets our 'caps wanted' value in a single op. @@ -554,6 +561,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, if (strncmp(dentry->d_name.name, client->mount_args->snapdir_name, dentry->d_name.len) && + !is_root_ceph_dentry(dir, dentry) && (ci->i_ceph_flags & CEPH_I_COMPLETE) && (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) { di->offset = ci->i_max_offset++; -- cgit v1.2.3 From 33d4909ccc094b8262667bccdd52e01458bee0df Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 2 Dec 2009 14:42:39 -0800 Subject: ceph: allow preferred osd to be get/set via layout ioctl There is certainly no reason not to report this. The only real downside to allowing the user to set it is that you don't get default values by zeroing the layout struct (the default is -1). Signed-off-by: Sage Weil --- fs/ceph/ioctl.c | 5 ++++- fs/ceph/ioctl.h | 1 + 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c index 4c33e19fc241..8a5bcae62846 100644 --- a/fs/ceph/ioctl.c +++ b/fs/ceph/ioctl.c @@ -24,6 +24,8 @@ static long ceph_ioctl_get_layout(struct file *file, void __user *arg) l.stripe_count = ceph_file_layout_stripe_count(ci->i_layout); l.object_size = ceph_file_layout_object_size(ci->i_layout); l.data_pool = le32_to_cpu(ci->i_layout.fl_pg_pool); + l.preferred_osd = + (s32)le32_to_cpu(ci->i_layout.fl_pg_preferred); if (copy_to_user(arg, &l, sizeof(l))) return -EFAULT; } @@ -79,7 +81,8 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg) req->r_args.setlayout.layout.fl_object_size = cpu_to_le32(l.object_size); req->r_args.setlayout.layout.fl_pg_pool = cpu_to_le32(l.data_pool); - req->r_args.setlayout.layout.fl_pg_preferred = cpu_to_le32((s32)-1); + req->r_args.setlayout.layout.fl_pg_preferred = + cpu_to_le32(l.preferred_osd); err = ceph_mdsc_do_request(mdsc, parent_inode, req); ceph_mdsc_put_request(req); diff --git a/fs/ceph/ioctl.h b/fs/ceph/ioctl.h index 3c511dab3730..25e4f1a9d059 100644 --- a/fs/ceph/ioctl.h +++ b/fs/ceph/ioctl.h @@ -10,6 +10,7 @@ struct ceph_ioctl_layout { __u64 stripe_unit, stripe_count, object_size; __u64 data_pool; + __s64 preferred_osd; }; #define CEPH_IOC_GET_LAYOUT _IOR(CEPH_IOCTL_MAGIC, 1, \ -- cgit v1.2.3 From fb99f8810965b7e5a00e9754bd7bf556b00ba0c2 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Thu, 3 Dec 2009 15:04:08 -0800 Subject: ceph: update MAINTAINERS entry with correct git URL Signed-off-by: Sage Weil --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index 9b680ff8c5c0..6fc10aaf7ca2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1298,7 +1298,7 @@ CEPH DISTRIBUTED FILE SYSTEM CLIENT M: Sage Weil L: ceph-devel@lists.sourceforge.net W: http://ceph.newdream.net/ -T: git git://ceph.newdream.net/linux-ceph-client.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git S: Supported F: Documentation/filesystems/ceph.txt F: fs/ceph -- cgit v1.2.3 From 2f2ffd35822688a3650e503197b8724f47312748 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 4 Dec 2009 10:27:17 -0800 Subject: ceph: mark v0.18 release Signed-off-by: Sage Weil --- fs/ceph/ceph_fs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/ceph/ceph_fs.h b/fs/ceph/ceph_fs.h index 699196a10c66..e2fd0247827e 100644 --- a/fs/ceph/ceph_fs.h +++ b/fs/ceph/ceph_fs.h @@ -19,7 +19,7 @@ * Ceph release version */ #define CEPH_VERSION_MAJOR 0 -#define CEPH_VERSION_MINOR 17 +#define CEPH_VERSION_MINOR 18 #define CEPH_VERSION_PATCH 0 #define _CEPH_STRINGIFY(x) #x -- cgit v1.2.3 From dd26d857a7bf1b5b734a23180c19eac3e46db944 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Sat, 5 Dec 2009 10:13:33 -0800 Subject: ceph: use kref for ceph_buffer Signed-off-by: Sage Weil --- fs/ceph/buffer.c | 14 +++++++++++++- fs/ceph/buffer.h | 18 +++++++----------- 2 files changed, 20 insertions(+), 12 deletions(-) diff --git a/fs/ceph/buffer.c b/fs/ceph/buffer.c index cf9aaccef22b..847c5da9a0db 100644 --- a/fs/ceph/buffer.c +++ b/fs/ceph/buffer.c @@ -9,13 +9,25 @@ struct ceph_buffer *ceph_buffer_new(gfp_t gfp) b = kmalloc(sizeof(*b), gfp); if (!b) return NULL; - atomic_set(&b->nref, 1); + kref_init(&b->kref); b->vec.iov_base = NULL; b->vec.iov_len = 0; b->alloc_len = 0; return b; } +void ceph_buffer_release(struct kref *kref) +{ + struct ceph_buffer *b = container_of(kref, struct ceph_buffer, kref); + if (b->vec.iov_base) { + if (b->is_vmalloc) + vfree(b->vec.iov_base); + else + kfree(b->vec.iov_base); + } + kfree(b); +} + int ceph_buffer_alloc(struct ceph_buffer *b, int len, gfp_t gfp) { b->vec.iov_base = kmalloc(len, gfp | __GFP_NOWARN); diff --git a/fs/ceph/buffer.h b/fs/ceph/buffer.h index 16b1930acc45..3f541a13094f 100644 --- a/fs/ceph/buffer.h +++ b/fs/ceph/buffer.h @@ -1,6 +1,7 @@ #ifndef __FS_CEPH_BUFFER_H #define __FS_CEPH_BUFFER_H +#include #include #include #include @@ -13,7 +14,7 @@ * sizes. */ struct ceph_buffer { - atomic_t nref; + struct kref kref; struct kvec vec; size_t alloc_len; bool is_vmalloc; @@ -24,21 +25,16 @@ int ceph_buffer_alloc(struct ceph_buffer *b, int len, gfp_t gfp); static inline struct ceph_buffer *ceph_buffer_get(struct ceph_buffer *b) { - atomic_inc(&b->nref); + kref_get(&b->kref); return b; } +void ceph_buffer_release(struct kref *kref); + static inline void ceph_buffer_put(struct ceph_buffer *b) { - if (b && atomic_dec_and_test(&b->nref)) { - if (b->vec.iov_base) { - if (b->is_vmalloc) - vfree(b->vec.iov_base); - else - kfree(b->vec.iov_base); - } - kfree(b); - } + if (b) + kref_put(&b->kref, ceph_buffer_release); } static inline struct ceph_buffer *ceph_buffer_new_alloc(int len, gfp_t gfp) -- cgit v1.2.3 From b6c1d5b81ea0841ae9d3ce2cda319ab986b081cf Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 7 Dec 2009 12:17:17 -0800 Subject: ceph: simplify ceph_buffer interface We never allocate the ceph_buffer and buffer separtely, so use a single constructor. Disallow put on NULL buffer; make the caller check. Signed-off-by: Sage Weil --- fs/ceph/buffer.c | 23 +++++++++++++++++++---- fs/ceph/buffer.h | 20 +++----------------- fs/ceph/inode.c | 11 +++++++---- fs/ceph/messenger.c | 2 +- fs/ceph/xattr.c | 8 +++++--- 5 files changed, 35 insertions(+), 29 deletions(-) diff --git a/fs/ceph/buffer.c b/fs/ceph/buffer.c index 847c5da9a0db..2576bd452cb8 100644 --- a/fs/ceph/buffer.c +++ b/fs/ceph/buffer.c @@ -2,23 +2,38 @@ #include "ceph_debug.h" #include "buffer.h" -struct ceph_buffer *ceph_buffer_new(gfp_t gfp) +struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp) { struct ceph_buffer *b; b = kmalloc(sizeof(*b), gfp); if (!b) return NULL; + + b->vec.iov_base = kmalloc(len, gfp | __GFP_NOWARN); + if (b->vec.iov_base) { + b->is_vmalloc = false; + } else { + b->vec.iov_base = __vmalloc(len, gfp, PAGE_KERNEL); + if (!b->vec.iov_base) { + kfree(b); + return NULL; + } + b->is_vmalloc = true; + } + kref_init(&b->kref); - b->vec.iov_base = NULL; - b->vec.iov_len = 0; - b->alloc_len = 0; + b->alloc_len = len; + b->vec.iov_len = len; + dout("buffer_new %p\n", b); return b; } void ceph_buffer_release(struct kref *kref) { struct ceph_buffer *b = container_of(kref, struct ceph_buffer, kref); + + dout("buffer_release %p\n", b); if (b->vec.iov_base) { if (b->is_vmalloc) vfree(b->vec.iov_base); diff --git a/fs/ceph/buffer.h b/fs/ceph/buffer.h index 3f541a13094f..47b9514c5bbd 100644 --- a/fs/ceph/buffer.h +++ b/fs/ceph/buffer.h @@ -20,8 +20,8 @@ struct ceph_buffer { bool is_vmalloc; }; -struct ceph_buffer *ceph_buffer_new(gfp_t gfp); -int ceph_buffer_alloc(struct ceph_buffer *b, int len, gfp_t gfp); +extern struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp); +extern void ceph_buffer_release(struct kref *kref); static inline struct ceph_buffer *ceph_buffer_get(struct ceph_buffer *b) { @@ -29,23 +29,9 @@ static inline struct ceph_buffer *ceph_buffer_get(struct ceph_buffer *b) return b; } -void ceph_buffer_release(struct kref *kref); - static inline void ceph_buffer_put(struct ceph_buffer *b) { - if (b) - kref_put(&b->kref, ceph_buffer_release); -} - -static inline struct ceph_buffer *ceph_buffer_new_alloc(int len, gfp_t gfp) -{ - struct ceph_buffer *b = ceph_buffer_new(gfp); - - if (b && ceph_buffer_alloc(b, len, gfp) < 0) { - ceph_buffer_put(b); - b = NULL; - } - return b; + kref_put(&b->kref, ceph_buffer_release); } #endif diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 074ee42bd344..db684686f48a 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -383,8 +383,10 @@ void ceph_destroy_inode(struct inode *inode) } __ceph_destroy_xattrs(ci); - ceph_buffer_put(ci->i_xattrs.blob); - ceph_buffer_put(ci->i_xattrs.prealloc_blob); + if (ci->i_xattrs.blob) + ceph_buffer_put(ci->i_xattrs.blob); + if (ci->i_xattrs.prealloc_blob) + ceph_buffer_put(ci->i_xattrs.prealloc_blob); kmem_cache_free(ceph_inode_cachep, ci); } @@ -526,7 +528,7 @@ static int fill_inode(struct inode *inode, * bytes are the xattr count). */ if (iinfo->xattr_len > 4) { - xattr_blob = ceph_buffer_new_alloc(iinfo->xattr_len, GFP_NOFS); + xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS); if (!xattr_blob) pr_err("fill_inode ENOMEM xattr blob %d bytes\n", iinfo->xattr_len); @@ -715,7 +717,8 @@ no_change: err = 0; out: - ceph_buffer_put(xattr_blob); + if (xattr_blob) + ceph_buffer_put(xattr_blob); return err; } diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index 45cec31fdf5e..bf762107b3d5 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -2047,7 +2047,7 @@ int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg) BUG_ON(!middle_len); BUG_ON(msg->middle); - msg->middle = ceph_buffer_new_alloc(middle_len, GFP_NOFS); + msg->middle = ceph_buffer_new(middle_len, GFP_NOFS); if (!msg->middle) return -ENOMEM; return 0; diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index 04769a3ab832..37d6ce645691 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c @@ -482,7 +482,8 @@ void __ceph_build_xattrs_blob(struct ceph_inode_info *ci) ci->i_xattrs.prealloc_blob->vec.iov_len = dest - ci->i_xattrs.prealloc_blob->vec.iov_base; - ceph_buffer_put(ci->i_xattrs.blob); + if (ci->i_xattrs.blob) + ceph_buffer_put(ci->i_xattrs.blob); ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob; ci->i_xattrs.prealloc_blob = NULL; ci->i_xattrs.dirty = false; @@ -745,11 +746,12 @@ retry: spin_unlock(&inode->i_lock); dout(" preaallocating new blob size=%d\n", required_blob_size); - blob = ceph_buffer_new_alloc(required_blob_size, GFP_NOFS); + blob = ceph_buffer_new(required_blob_size, GFP_NOFS); if (!blob) goto out; spin_lock(&inode->i_lock); - ceph_buffer_put(ci->i_xattrs.prealloc_blob); + if (ci->i_xattrs.prealloc_blob) + ceph_buffer_put(ci->i_xattrs.prealloc_blob); ci->i_xattrs.prealloc_blob = blob; goto retry; } -- cgit v1.2.3 From 153c8e6bf7ffee561e046e60b26ef6486c6fc9f2 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 7 Dec 2009 12:31:09 -0800 Subject: ceph: use kref for struct ceph_mds_request Signed-off-by: Sage Weil --- fs/ceph/mds_client.c | 69 ++++++++++++++++++++++++++-------------------------- fs/ceph/mds_client.h | 11 ++++++--- 2 files changed, 42 insertions(+), 38 deletions(-) diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 7da836909abb..739093f281d0 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -400,41 +400,40 @@ static void put_request_session(struct ceph_mds_request *req) } } -void ceph_mdsc_put_request(struct ceph_mds_request *req) -{ - dout("mdsc put_request %p %d -> %d\n", req, - atomic_read(&req->r_ref), atomic_read(&req->r_ref)-1); - if (atomic_dec_and_test(&req->r_ref)) { - if (req->r_request) - ceph_msg_put(req->r_request); - if (req->r_reply) { - ceph_msg_put(req->r_reply); - destroy_reply_info(&req->r_reply_info); - } - if (req->r_inode) { - ceph_put_cap_refs(ceph_inode(req->r_inode), - CEPH_CAP_PIN); - iput(req->r_inode); - } - if (req->r_locked_dir) - ceph_put_cap_refs(ceph_inode(req->r_locked_dir), - CEPH_CAP_PIN); - if (req->r_target_inode) - iput(req->r_target_inode); - if (req->r_dentry) - dput(req->r_dentry); - if (req->r_old_dentry) { - ceph_put_cap_refs( - ceph_inode(req->r_old_dentry->d_parent->d_inode), - CEPH_CAP_PIN); - dput(req->r_old_dentry); - } - kfree(req->r_path1); - kfree(req->r_path2); - put_request_session(req); - ceph_unreserve_caps(&req->r_caps_reservation); - kfree(req); +void ceph_mdsc_release_request(struct kref *kref) +{ + struct ceph_mds_request *req = container_of(kref, + struct ceph_mds_request, + r_kref); + if (req->r_request) + ceph_msg_put(req->r_request); + if (req->r_reply) { + ceph_msg_put(req->r_reply); + destroy_reply_info(&req->r_reply_info); + } + if (req->r_inode) { + ceph_put_cap_refs(ceph_inode(req->r_inode), + CEPH_CAP_PIN); + iput(req->r_inode); + } + if (req->r_locked_dir) + ceph_put_cap_refs(ceph_inode(req->r_locked_dir), + CEPH_CAP_PIN); + if (req->r_target_inode) + iput(req->r_target_inode); + if (req->r_dentry) + dput(req->r_dentry); + if (req->r_old_dentry) { + ceph_put_cap_refs( + ceph_inode(req->r_old_dentry->d_parent->d_inode), + CEPH_CAP_PIN); + dput(req->r_old_dentry); } + kfree(req->r_path1); + kfree(req->r_path2); + put_request_session(req); + ceph_unreserve_caps(&req->r_caps_reservation); + kfree(req); } /* @@ -1097,7 +1096,7 @@ ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode) req->r_resend_mds = -1; INIT_LIST_HEAD(&req->r_unsafe_dir_item); req->r_fmode = -1; - atomic_set(&req->r_ref, 1); /* one for request_tree, one for caller */ + kref_init(&req->r_kref); INIT_LIST_HEAD(&req->r_wait); init_completion(&req->r_completion); init_completion(&req->r_safe_completion); diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index 9faa1b2f79a7..41af5ca316e6 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -2,6 +2,7 @@ #define _FS_CEPH_MDS_CLIENT_H #include +#include #include #include #include @@ -203,7 +204,7 @@ struct ceph_mds_request { int r_num_stale; int r_resend_mds; /* mds to resend to next, if any*/ - atomic_t r_ref; + struct kref r_kref; struct list_head r_wait; struct completion r_completion; struct completion r_safe_completion; @@ -306,9 +307,13 @@ extern int ceph_mdsc_do_request(struct ceph_mds_client *mdsc, struct ceph_mds_request *req); static inline void ceph_mdsc_get_request(struct ceph_mds_request *req) { - atomic_inc(&req->r_ref); + kref_get(&req->r_kref); +} +extern void ceph_mdsc_release_request(struct kref *kref); +static inline void ceph_mdsc_put_request(struct ceph_mds_request *req) +{ + kref_put(&req->r_kref, ceph_mdsc_release_request); } -extern void ceph_mdsc_put_request(struct ceph_mds_request *req); extern void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc); -- cgit v1.2.3 From 415e49a9c4faf1a1480b1497da2037608e5aa2c5 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 7 Dec 2009 13:37:03 -0800 Subject: ceph: use kref for ceph_osd_request Signed-off-by: Sage Weil --- fs/ceph/osd_client.c | 37 ++++++++++++++++++------------------- fs/ceph/osd_client.h | 11 ++++++++--- 2 files changed, 26 insertions(+), 22 deletions(-) diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index d639c74e749f..67ef8ab06af4 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -77,25 +77,24 @@ static void calc_layout(struct ceph_osd_client *osdc, /* * requests */ -void ceph_osdc_put_request(struct ceph_osd_request *req) +void ceph_osdc_release_request(struct kref *kref) { - dout("osdc put_request %p %d -> %d\n", req, atomic_read(&req->r_ref), - atomic_read(&req->r_ref)-1); - BUG_ON(atomic_read(&req->r_ref) <= 0); - if (atomic_dec_and_test(&req->r_ref)) { - if (req->r_request) - ceph_msg_put(req->r_request); - if (req->r_reply) - ceph_msg_put(req->r_reply); - if (req->r_own_pages) - ceph_release_page_vector(req->r_pages, - req->r_num_pages); - ceph_put_snap_context(req->r_snapc); - if (req->r_mempool) - mempool_free(req, req->r_osdc->req_mempool); - else - kfree(req); - } + struct ceph_osd_request *req = container_of(kref, + struct ceph_osd_request, + r_kref); + + if (req->r_request) + ceph_msg_put(req->r_request); + if (req->r_reply) + ceph_msg_put(req->r_reply); + if (req->r_own_pages) + ceph_release_page_vector(req->r_pages, + req->r_num_pages); + ceph_put_snap_context(req->r_snapc); + if (req->r_mempool) + mempool_free(req, req->r_osdc->req_mempool); + else + kfree(req); } /* @@ -149,7 +148,7 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, req->r_osdc = osdc; req->r_mempool = use_mempool; - atomic_set(&req->r_ref, 1); + kref_init(&req->r_kref); init_completion(&req->r_completion); init_completion(&req->r_safe_completion); INIT_LIST_HEAD(&req->r_unsafe_item); diff --git a/fs/ceph/osd_client.h b/fs/ceph/osd_client.h index 3d4ae6595aaa..20ee61847416 100644 --- a/fs/ceph/osd_client.h +++ b/fs/ceph/osd_client.h @@ -2,6 +2,7 @@ #define _FS_CEPH_OSD_CLIENT_H #include +#include #include #include @@ -49,7 +50,7 @@ struct ceph_osd_request { int r_prepared_pages, r_got_reply; struct ceph_osd_client *r_osdc; - atomic_t r_ref; + struct kref r_kref; bool r_mempool; struct completion r_completion, r_safe_completion; ceph_osdc_callback_t r_callback, r_safe_callback; @@ -118,9 +119,13 @@ extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *, static inline void ceph_osdc_get_request(struct ceph_osd_request *req) { - atomic_inc(&req->r_ref); + kref_get(&req->r_kref); +} +extern void ceph_osdc_release_request(struct kref *kref); +static inline void ceph_osdc_put_request(struct ceph_osd_request *req) +{ + kref_put(&req->r_kref, ceph_osdc_release_request); } -extern void ceph_osdc_put_request(struct ceph_osd_request *req); extern int ceph_osdc_start_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req, -- cgit v1.2.3 From c2e552e76e2c6907ca50cd9a4b747a2e2e8c615e Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 7 Dec 2009 15:55:05 -0800 Subject: ceph: use kref for ceph_msg Signed-off-by: Sage Weil --- fs/ceph/messenger.c | 47 ++++++++++++++++++----------------------------- fs/ceph/messenger.h | 13 ++++++++----- fs/ceph/msgpool.c | 2 +- 3 files changed, 27 insertions(+), 35 deletions(-) diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index bf762107b3d5..b0571b01b19f 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -1958,7 +1958,7 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, m = kmalloc(sizeof(*m), GFP_NOFS); if (m == NULL) goto out; - atomic_set(&m->nref, 1); + kref_init(&m->kref); INIT_LIST_HEAD(&m->list_head); m->hdr.type = cpu_to_le16(type); @@ -2070,34 +2070,23 @@ void ceph_msg_kfree(struct ceph_msg *m) /* * Drop a msg ref. Destroy as needed. */ -void ceph_msg_put(struct ceph_msg *m) -{ - dout("ceph_msg_put %p %d -> %d\n", m, atomic_read(&m->nref), - atomic_read(&m->nref)-1); - if (atomic_read(&m->nref) <= 0) { - pr_err("bad ceph_msg_put on %p %llu %d=%s %d+%d\n", - m, le64_to_cpu(m->hdr.seq), - le16_to_cpu(m->hdr.type), - ceph_msg_type_name(le16_to_cpu(m->hdr.type)), - le32_to_cpu(m->hdr.front_len), - le32_to_cpu(m->hdr.data_len)); - WARN_ON(1); - } - if (atomic_dec_and_test(&m->nref)) { - dout("ceph_msg_put last one on %p\n", m); - WARN_ON(!list_empty(&m->list_head)); - - /* drop middle, data, if any */ - if (m->middle) { - ceph_buffer_put(m->middle); - m->middle = NULL; - } - m->nr_pages = 0; - m->pages = NULL; +void ceph_msg_last_put(struct kref *kref) +{ + struct ceph_msg *m = container_of(kref, struct ceph_msg, kref); - if (m->pool) - ceph_msgpool_put(m->pool, m); - else - ceph_msg_kfree(m); + dout("ceph_msg_put last one on %p\n", m); + WARN_ON(!list_empty(&m->list_head)); + + /* drop middle, data, if any */ + if (m->middle) { + ceph_buffer_put(m->middle); + m->middle = NULL; } + m->nr_pages = 0; + m->pages = NULL; + + if (m->pool) + ceph_msgpool_put(m->pool, m); + else + ceph_msg_kfree(m); } diff --git a/fs/ceph/messenger.h b/fs/ceph/messenger.h index f9c9f6487302..981b7c08ad82 100644 --- a/fs/ceph/messenger.h +++ b/fs/ceph/messenger.h @@ -1,6 +1,7 @@ #ifndef __FS_CEPH_MESSENGER_H #define __FS_CEPH_MESSENGER_H +#include #include #include #include @@ -85,7 +86,7 @@ struct ceph_msg { struct page **pages; /* data payload. NOT OWNER. */ unsigned nr_pages; /* size of page array */ struct list_head list_head; - atomic_t nref; + struct kref kref; bool front_is_vmalloc; bool more_to_follow; int front_max; @@ -243,11 +244,13 @@ extern int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg); static inline struct ceph_msg *ceph_msg_get(struct ceph_msg *msg) { - dout("ceph_msg_get %p %d -> %d\n", msg, atomic_read(&msg->nref), - atomic_read(&msg->nref)+1); - atomic_inc(&msg->nref); + kref_get(&msg->kref); return msg; } -extern void ceph_msg_put(struct ceph_msg *msg); +extern void ceph_msg_last_put(struct kref *kref); +static inline void ceph_msg_put(struct ceph_msg *msg) +{ + kref_put(&msg->kref, ceph_msg_last_put); +} #endif diff --git a/fs/ceph/msgpool.c b/fs/ceph/msgpool.c index 7599b3382076..ad5482c0267b 100644 --- a/fs/ceph/msgpool.c +++ b/fs/ceph/msgpool.c @@ -165,7 +165,7 @@ void ceph_msgpool_put(struct ceph_msgpool *pool, struct ceph_msg *msg) { spin_lock(&pool->lock); if (pool->num < pool->min) { - ceph_msg_get(msg); /* retake a single ref */ + kref_set(&msg->kref, 1); /* retake a single ref */ list_add(&msg->list_head, &pool->msgs); pool->num++; dout("msgpool_put %p reclaim %p, now %d/%d\n", pool, msg, -- cgit v1.2.3 From 767ea5c33a360ce88da24e296e802dace5821799 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 9 Dec 2009 12:34:01 -0800 Subject: ceph: do not feed bad device ids to crush Do not feed bad (large) device ids to CRUSH. Signed-off-by: Sage Weil --- fs/ceph/osdmap.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c index 8c994c714781..be5318aa7714 100644 --- a/fs/ceph/osdmap.c +++ b/fs/ceph/osdmap.c @@ -868,6 +868,11 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid, ps = le16_to_cpu(pgid.ps); preferred = (s16)le16_to_cpu(pgid.preferred); + /* don't forcefeed bad device ids to crush */ + if (preferred >= osdmap->max_osd || + preferred >= osdmap->crush->max_devices) + preferred = -1; + if (poolid >= osdmap->num_pools) return NULL; pool = &osdmap->pg_pool[poolid]; -- cgit v1.2.3 From d4a780ce8821a37dd135f15b6150a5bfc5604f29 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 11 Dec 2009 08:55:23 -0800 Subject: ceph: fix leak of monc mutex Fix leak of monc mutex on ENOMEM or bad fsid when receiving new mon map. Audited all other users. Signed-off-by: Sage Weil --- fs/ceph/mon_client.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fs/ceph/mon_client.c b/fs/ceph/mon_client.c index 1dd0dc258c50..a76da5e6dbdd 100644 --- a/fs/ceph/mon_client.c +++ b/fs/ceph/mon_client.c @@ -320,17 +320,18 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc, if (IS_ERR(monmap)) { pr_err("problem decoding monmap, %d\n", (int)PTR_ERR(monmap)); - return; + goto out; } if (ceph_check_fsid(monc->client, &monmap->fsid) < 0) { kfree(monmap); - return; + goto out; } client->monc.monmap = monmap; kfree(old); +out: mutex_unlock(&monc->mutex); wake_up(&client->mount_wq); } -- cgit v1.2.3 From c86a2930ccbd90d77c54d04b5c2bbec95b989e40 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 14 Dec 2009 14:04:30 -0800 Subject: ceph: carry explicit msg reference for currently sending message Carry a ceph_msg reference for connection->out_msg. This will allow us to make out_sent optional. Signed-off-by: Sage Weil --- fs/ceph/messenger.c | 22 ++++++++++++++++++---- fs/ceph/messenger.h | 1 + 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index b0571b01b19f..96fd556a7804 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -322,7 +322,10 @@ static void reset_connection(struct ceph_connection *con) con->connect_seq = 0; con->out_seq = 0; - con->out_msg = NULL; + if (con->out_msg) { + ceph_msg_put(con->out_msg); + con->out_msg = NULL; + } con->in_seq = 0; mutex_unlock(&con->out_mutex); } @@ -423,7 +426,7 @@ static void prepare_write_message_footer(struct ceph_connection *con, int v) con->out_kvec_bytes += sizeof(m->footer); con->out_kvec_left++; con->out_more = m->more_to_follow; - con->out_msg = NULL; /* we're done with this one */ + con->out_msg_done = true; } /* @@ -436,6 +439,7 @@ static void prepare_write_message(struct ceph_connection *con) con->out_kvec_bytes = 0; con->out_kvec_is_msg = true; + con->out_msg_done = false; /* Sneak an ack in there first? If we can get it into the same * TCP packet that's a good thing. */ @@ -452,8 +456,9 @@ static void prepare_write_message(struct ceph_connection *con) /* move message to sending/sent list */ m = list_first_entry(&con->out_queue, struct ceph_msg, list_head); + con->out_msg = m; + ceph_msg_get(m); list_move_tail(&m->list_head, &con->out_sent); - con->out_msg = m; /* we don't bother taking a reference here. */ m->hdr.seq = cpu_to_le64(++con->out_seq); @@ -1521,6 +1526,12 @@ more_kvec: /* msg pages? */ if (con->out_msg) { + if (con->out_msg_done) { + ceph_msg_put(con->out_msg); + con->out_msg = NULL; /* we're done with this one */ + goto do_next; + } + ret = write_partial_msg_pages(con); if (ret == 1) goto more_kvec; /* we need to send the footer, too! */ @@ -1533,6 +1544,7 @@ more_kvec: } } +do_next: if (!test_bit(CONNECTING, &con->state)) { /* is anything else pending? */ if (!list_empty(&con->out_queue)) { @@ -1923,8 +1935,10 @@ void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg) list_del_init(&msg->list_head); ceph_msg_put(msg); msg->hdr.seq = 0; - if (con->out_msg == msg) + if (con->out_msg == msg) { + ceph_msg_put(con->out_msg); con->out_msg = NULL; + } if (con->out_kvec_is_msg) { con->out_skip = con->out_kvec_bytes; con->out_kvec_is_msg = false; diff --git a/fs/ceph/messenger.h b/fs/ceph/messenger.h index 981b7c08ad82..eff5cb5197fc 100644 --- a/fs/ceph/messenger.h +++ b/fs/ceph/messenger.h @@ -182,6 +182,7 @@ struct ceph_connection { /* message out temps */ struct ceph_msg *out_msg; /* sending message (== tail of out_sent) */ + bool out_msg_done; struct ceph_msg_pos out_msg_pos; struct kvec out_kvec[8], /* sending header/footer data */ -- cgit v1.2.3 From 5e095e8b40b0402ad3bcadc5b8d84c38b26c30b2 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 14 Dec 2009 14:30:34 -0800 Subject: ceph: plug msg leak in con_fault Signed-off-by: Sage Weil --- fs/ceph/messenger.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index 96fd556a7804..98519bd33f04 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -1434,8 +1434,9 @@ no_data: */ static void process_message(struct ceph_connection *con) { - struct ceph_msg *msg = con->in_msg; + struct ceph_msg *msg; + msg = con->in_msg; con->in_msg = NULL; /* if first message, set peer_name */ @@ -1810,7 +1811,11 @@ static void ceph_fault(struct ceph_connection *con) clear_bit(BUSY, &con->state); /* to avoid an improbable race */ con_close_socket(con); - con->in_msg = NULL; + + if (con->in_msg) { + ceph_msg_put(con->in_msg); + con->in_msg = NULL; + } /* If there are no messages in the queue, place the connection * in a STANDBY state (i.e., don't try to reconnect just yet). */ -- cgit v1.2.3 From 92ac41d0a4ab26fb68d3f841332e5d1f15d79123 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 14 Dec 2009 14:56:56 -0800 Subject: ceph: detect lossy state of connection The server indicates whether a connection is lossy; set our LOSSYTX bit appropriately. Do not set lossy bit on outgoing connections. Signed-off-by: Sage Weil --- fs/ceph/messenger.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index 98519bd33f04..986d8fb9c574 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -625,8 +625,6 @@ static void prepare_write_connect(struct ceph_messenger *msgr, con->out_connect.global_seq = cpu_to_le32(global_seq); con->out_connect.protocol_version = cpu_to_le32(proto); con->out_connect.flags = 0; - if (test_bit(LOSSYTX, &con->state)) - con->out_connect.flags = CEPH_MSG_CONNECT_LOSSY; if (!after_banner) { con->out_kvec_left = 0; @@ -1168,6 +1166,10 @@ static int process_connect(struct ceph_connection *con) con->connect_seq); WARN_ON(con->connect_seq != le32_to_cpu(con->in_reply.connect_seq)); + + if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY) + set_bit(LOSSYTX, &con->state); + prepare_read_tag(con); break; -- cgit v1.2.3 From b3d1dbbdd5670d8a9fb01f7dfb1cac522ff6795a Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 14 Dec 2009 14:58:11 -0800 Subject: ceph: don't save sent messages on lossy connections For lossy connections we drop all state on socket errors, so there is no reason to keep sent ceph_msg's around. Signed-off-by: Sage Weil --- fs/ceph/messenger.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index 986d8fb9c574..d5eef76a253c 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -453,12 +453,16 @@ static void prepare_write_message(struct ceph_connection *con) con->out_kvec_bytes = 1 + sizeof(con->out_temp_ack); } - /* move message to sending/sent list */ m = list_first_entry(&con->out_queue, struct ceph_msg, list_head); con->out_msg = m; - ceph_msg_get(m); - list_move_tail(&m->list_head, &con->out_sent); + if (test_bit(LOSSYTX, &con->state)) { + /* put message on sent list */ + ceph_msg_get(m); + list_move_tail(&m->list_head, &con->out_sent); + } else { + list_del_init(&m->list_head); + } m->hdr.seq = cpu_to_le64(++con->out_seq); -- cgit v1.2.3 From 93c20d98c29ccefa039c3843ccc37122caaf3d31 Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Tue, 15 Dec 2009 09:50:36 -0800 Subject: ceph: fix msgpool reservation leak Signed-off-by: Yehuda Sadeh --- fs/ceph/osd_client.c | 5 ++++- fs/ceph/osd_client.h | 1 + 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index 67ef8ab06af4..63482ef3de01 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -145,6 +145,7 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, ceph_osdc_put_request(req); return ERR_PTR(-ENOMEM); } + req->r_num_prealloc_reply = num_reply; req->r_osdc = osdc; req->r_mempool = use_mempool; @@ -165,7 +166,7 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, else msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, 0, 0, NULL); if (IS_ERR(msg)) { - ceph_msgpool_resv(&osdc->msgpool_op_reply, num_reply); + ceph_msgpool_resv(&osdc->msgpool_op_reply, -num_reply); ceph_osdc_put_request(req); return ERR_PTR(PTR_ERR(msg)); } @@ -465,6 +466,8 @@ static void __unregister_request(struct ceph_osd_client *osdc, rb_erase(&req->r_node, &osdc->requests); osdc->num_requests--; + ceph_msgpool_resv(&osdc->msgpool_op_reply, -req->r_num_prealloc_reply); + if (req->r_osd) { /* make sure the original request isn't in flight. */ ceph_con_revoke(&req->r_osd->o_con, req->r_request); diff --git a/fs/ceph/osd_client.h b/fs/ceph/osd_client.h index 20ee61847416..2e4cfd1e9f10 100644 --- a/fs/ceph/osd_client.h +++ b/fs/ceph/osd_client.h @@ -48,6 +48,7 @@ struct ceph_osd_request { int r_flags; /* any additional flags for the osd */ u32 r_sent; /* >0 if r_request is sending/sent */ int r_prepared_pages, r_got_reply; + int r_num_prealloc_reply; struct ceph_osd_client *r_osdc; struct kref r_kref; -- cgit v1.2.3 From 9ec7cab14e6de732d4e7c355fe67c5810c32c758 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 14 Dec 2009 15:13:47 -0800 Subject: ceph: hex dump corrupt server data to KERN_DEBUG Also, print fsid using standard format, NOT hex dump. Signed-off-by: Sage Weil --- fs/ceph/caps.c | 1 + fs/ceph/mds_client.c | 4 ++++ fs/ceph/mdsmap.c | 4 ++++ fs/ceph/messenger.c | 20 ++++++++++++++++++++ fs/ceph/messenger.h | 2 ++ fs/ceph/mon_client.c | 2 ++ fs/ceph/osd_client.c | 2 ++ fs/ceph/osdmap.c | 3 +++ fs/ceph/snap.c | 1 + fs/ceph/super.c | 9 ++------- 10 files changed, 41 insertions(+), 7 deletions(-) diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 9b9ce143ac1f..dfb509f53542 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -2681,6 +2681,7 @@ done: bad: pr_err("ceph_handle_caps: corrupt message\n"); + ceph_msg_dump(msg); return; } diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 739093f281d0..29a93fe35f85 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1650,6 +1650,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) return; if (msg->front.iov_len < sizeof(*head)) { pr_err("mdsc_handle_reply got corrupt (short) reply\n"); + ceph_msg_dump(msg); return; } @@ -1740,6 +1741,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) mutex_lock(&session->s_mutex); if (err < 0) { pr_err("mdsc_handle_reply got corrupt reply mds%d\n", mds); + ceph_msg_dump(msg); goto out_err; } @@ -1929,6 +1931,7 @@ static void handle_session(struct ceph_mds_session *session, bad: pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds, (int)msg->front.iov_len); + ceph_msg_dump(msg); return; } @@ -2394,6 +2397,7 @@ out: bad: pr_err("corrupt lease message\n"); + ceph_msg_dump(msg); } void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session, diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c index cad8d25861e5..c4c498e6dfef 100644 --- a/fs/ceph/mdsmap.c +++ b/fs/ceph/mdsmap.c @@ -49,6 +49,7 @@ int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m) struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end) { struct ceph_mdsmap *m; + const void *start = *p; int i, j, n; int err = -EINVAL; u16 version; @@ -154,6 +155,9 @@ badmem: err = -ENOMEM; bad: pr_err("corrupt mdsmap\n"); + print_hex_dump(KERN_DEBUG, "mdsmap: ", + DUMP_PREFIX_OFFSET, 16, 1, + start, end - start, true); ceph_mdsmap_destroy(m); return ERR_PTR(-EINVAL); } diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index d5eef76a253c..b10f88c56706 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -2115,3 +2115,23 @@ void ceph_msg_last_put(struct kref *kref) else ceph_msg_kfree(m); } + +void ceph_msg_dump(struct ceph_msg *msg) +{ + pr_debug("msg_dump %p (front_max %d nr_pages %d)\n", msg, + msg->front_max, msg->nr_pages); + print_hex_dump(KERN_DEBUG, "header: ", + DUMP_PREFIX_OFFSET, 16, 1, + &msg->hdr, sizeof(msg->hdr), true); + print_hex_dump(KERN_DEBUG, " front: ", + DUMP_PREFIX_OFFSET, 16, 1, + msg->front.iov_base, msg->front.iov_len, true); + if (msg->middle) + print_hex_dump(KERN_DEBUG, "middle: ", + DUMP_PREFIX_OFFSET, 16, 1, + msg->middle->vec.iov_base, + msg->middle->vec.iov_len, true); + print_hex_dump(KERN_DEBUG, "footer: ", + DUMP_PREFIX_OFFSET, 16, 1, + &msg->footer, sizeof(msg->footer), true); +} diff --git a/fs/ceph/messenger.h b/fs/ceph/messenger.h index eff5cb5197fc..e04c214b4f6f 100644 --- a/fs/ceph/messenger.h +++ b/fs/ceph/messenger.h @@ -254,4 +254,6 @@ static inline void ceph_msg_put(struct ceph_msg *msg) kref_put(&msg->kref, ceph_msg_last_put); } +extern void ceph_msg_dump(struct ceph_msg *msg); + #endif diff --git a/fs/ceph/mon_client.c b/fs/ceph/mon_client.c index a76da5e6dbdd..775a9c029c51 100644 --- a/fs/ceph/mon_client.c +++ b/fs/ceph/mon_client.c @@ -242,6 +242,7 @@ static void handle_subscribe_ack(struct ceph_mon_client *monc, return; bad: pr_err("got corrupt subscribe-ack msg\n"); + ceph_msg_dump(msg); } /* @@ -364,6 +365,7 @@ static void handle_statfs_reply(struct ceph_mon_client *monc, bad: pr_err("corrupt statfs reply, no tid\n"); + ceph_msg_dump(msg); } /* diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index 63482ef3de01..4bfe880d53c8 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -773,6 +773,7 @@ bad: pr_err("corrupt osd_op_reply got %d %d expected %d\n", (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len), (int)sizeof(*rhead)); + ceph_msg_dump(msg); } @@ -964,6 +965,7 @@ done: bad: pr_err("osdc handle_map corrupt msg\n"); + ceph_msg_dump(msg); up_write(&osdc->map_sem); return; } diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c index be5318aa7714..8c8ffe5ef7d4 100644 --- a/fs/ceph/osdmap.c +++ b/fs/ceph/osdmap.c @@ -726,6 +726,9 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, bad: pr_err("corrupt inc osdmap epoch %d off %d (%p of %p-%p)\n", epoch, (int)(*p - start), *p, start, end); + print_hex_dump(KERN_DEBUG, "osdmap: ", + DUMP_PREFIX_OFFSET, 16, 1, + start, end - start, true); if (newcrush) crush_destroy(newcrush); return ERR_PTR(err); diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index 52f46a1208f5..dcf18d92130a 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -877,6 +877,7 @@ split_skip_inode: bad: pr_err("corrupt snap message from mds%d\n", mds); + ceph_msg_dump(msg); out: if (locked_rwsem) up_write(&mdsc->snap_rwsem); diff --git a/fs/ceph/super.c b/fs/ceph/super.c index a828943296c5..6d02a166f8ff 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -602,13 +602,8 @@ int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid) { if (client->have_fsid) { if (ceph_fsid_compare(&client->fsid, fsid)) { - print_hex_dump(KERN_ERR, "this fsid: ", - DUMP_PREFIX_NONE, 16, 1, - (void *)fsid, 16, 0); - print_hex_dump(KERN_ERR, " old fsid: ", - DUMP_PREFIX_NONE, 16, 1, - (void *)&client->fsid, 16, 0); - pr_err("fsid mismatch\n"); + pr_err("bad fsid, had " FSID_FORMAT " got " FSID_FORMAT, + PR_FSID(&client->fsid), PR_FSID(fsid)); return -1; } } else { -- cgit v1.2.3 From cf3e5c409b5d66ec66207092a3f7e3e2c42c0f3f Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 11 Dec 2009 09:48:05 -0800 Subject: ceph: plug leak of incoming message during connection fault/close If we explicitly close a connection, or there is a socket error, we need to drop any partially received message. Signed-off-by: Sage Weil --- fs/ceph/messenger.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index b10f88c56706..b12604ef1846 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -320,6 +320,11 @@ static void reset_connection(struct ceph_connection *con) ceph_msg_remove_list(&con->out_queue); ceph_msg_remove_list(&con->out_sent); + if (con->in_msg) { + ceph_msg_put(con->in_msg); + con->in_msg = NULL; + } + con->connect_seq = 0; con->out_seq = 0; if (con->out_msg) { @@ -1288,7 +1293,7 @@ static int read_partial_message(struct ceph_connection *con) con->in_msg = con->ops->alloc_msg(con, &con->in_hdr); if (!con->in_msg) { /* skip this message */ - dout("alloc_msg returned NULL, skipping message\n"); + pr_err("alloc_msg returned NULL, skipping message\n"); con->in_base_pos = -front_len - middle_len - data_len - sizeof(m->footer); con->in_tag = CEPH_MSGR_TAG_READY; @@ -1327,7 +1332,7 @@ static int read_partial_message(struct ceph_connection *con) if (con->ops->alloc_middle) ret = con->ops->alloc_middle(con, m); if (ret < 0) { - dout("alloc_middle failed, skipping payload\n"); + pr_err("alloc_middle fail skipping payload\n"); con->in_base_pos = -middle_len - data_len - sizeof(m->footer); ceph_msg_put(con->in_msg); @@ -1498,6 +1503,7 @@ more: set_bit(CONNECTING, &con->state); clear_bit(NEGOTIATING, &con->state); + BUG_ON(con->in_msg); con->in_tag = CEPH_MSGR_TAG_READY; dout("try_write initiating connect on %p new state %lu\n", con, con->state); -- cgit v1.2.3 From e2885f06ce31d82b556be021acfa2eba160f29cc Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 15 Dec 2009 10:27:48 -0800 Subject: ceph: make mds ops interruptible Signed-off-by: Sage Weil --- fs/ceph/mds_client.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 29a93fe35f85..d7cecc3366da 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1597,14 +1597,17 @@ int ceph_mdsc_do_request(struct ceph_mds_client *mdsc, if (!req->r_reply) { mutex_unlock(&mdsc->mutex); if (req->r_timeout) { - err = wait_for_completion_timeout(&req->r_completion, - req->r_timeout); - if (err > 0) - err = 0; - else if (err == 0) + err = (long)wait_for_completion_interruptible_timeout( + &req->r_completion, req->r_timeout); + if (err == 0) req->r_reply = ERR_PTR(-EIO); + else if (err < 0) + req->r_reply = ERR_PTR(err); } else { - wait_for_completion(&req->r_completion); + err = wait_for_completion_interruptible( + &req->r_completion); + if (err) + req->r_reply = ERR_PTR(err); } mutex_lock(&mdsc->mutex); } -- cgit v1.2.3 From 06edf046dd68ccbc7cf5f70f957a31702d0e7596 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 15 Dec 2009 14:44:32 -0800 Subject: ceph: include link to bdi in debugfs Signed-off-by: Sage Weil --- fs/ceph/debugfs.c | 7 +++++++ fs/ceph/super.h | 1 + 2 files changed, 8 insertions(+) diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c index b90fc3e1ff70..441484ab7e94 100644 --- a/fs/ceph/debugfs.c +++ b/fs/ceph/debugfs.c @@ -1,5 +1,6 @@ #include "ceph_debug.h" +#include #include #include #include @@ -24,6 +25,7 @@ * .../monc - mon client state * .../dentry_lru - dump contents of dentry lru * .../caps - expose cap (reservation) stats + * .../bdi - symlink to ../../bdi/something */ static struct dentry *ceph_debugfs_dir; @@ -407,6 +409,10 @@ int ceph_debugfs_client_init(struct ceph_client *client) if (!client->debugfs_caps) goto out; + sprintf(name, "../../bdi/%s", dev_name(client->sb->s_bdi->dev)); + client->debugfs_bdi = debugfs_create_symlink("bdi", client->debugfs_dir, + name); + return 0; out: @@ -416,6 +422,7 @@ out: void ceph_debugfs_client_cleanup(struct ceph_client *client) { + debugfs_remove(client->debugfs_bdi); debugfs_remove(client->debugfs_caps); debugfs_remove(client->debugfs_dentry_lru); debugfs_remove(client->debugfs_osdmap); diff --git a/fs/ceph/super.h b/fs/ceph/super.h index de5e32414978..2304bd2844a4 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -143,6 +143,7 @@ struct ceph_client { struct dentry *debugfs_monmap; struct dentry *debugfs_mdsmap, *debugfs_osdmap; struct dentry *debugfs_dir, *debugfs_dentry_lru, *debugfs_caps; + struct dentry *debugfs_bdi; #endif }; -- cgit v1.2.3 From 169e16ce816ca417286daf1db25de424a9d65a0c Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Wed, 16 Dec 2009 14:22:17 -0800 Subject: ceph: remove unaccessible code Signed-off-by: Yehuda Sadeh --- fs/ceph/messenger.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index b12604ef1846..2e4e9773c46b 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -1531,10 +1531,6 @@ more_kvec: ret = write_partial_kvec(con); if (ret <= 0) goto done; - if (ret < 0) { - dout("try_write write_partial_kvec err %d\n", ret); - goto done; - } } /* msg pages? */ -- cgit v1.2.3 From dbd646a851713bec5bfff40ecf624b2e78518fe5 Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Wed, 16 Dec 2009 14:51:06 -0800 Subject: ceph: writepage grabs and releases inode Fixes a deadlock that is triggered due to kswapd, while the page was locked and the iput couldn't tear down the address space. Signed-off-by: Yehuda Sadeh --- fs/ceph/addr.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index bf535815592d..d0cdceb0b90b 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -448,8 +448,13 @@ out: static int ceph_writepage(struct page *page, struct writeback_control *wbc) { - int err = writepage_nounlock(page, wbc); + int err; + struct inode *inode = page->mapping->host; + BUG_ON(!inode); + igrab(inode); + err = writepage_nounlock(page, wbc); unlock_page(page); + iput(inode); return err; } -- cgit v1.2.3 From 2baba25019ec564cd247af74013873d69a0b8190 Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Fri, 18 Dec 2009 13:51:57 -0800 Subject: ceph: writeback congestion control Set bdi congestion bit when amount of write data in flight exceeds adjustable threshold. Signed-off-by: Yehuda Sadeh Signed-off-by: Sage Weil --- fs/ceph/addr.c | 35 +++++++++++++++++++++++++++++++++-- fs/ceph/debugfs.c | 33 +++++++++++++++++++++++++++++++++ fs/ceph/super.c | 36 ++++++++++++++++++++++++++++++++++++ fs/ceph/super.h | 3 +++ 4 files changed, 105 insertions(+), 2 deletions(-) diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index d0cdceb0b90b..a6850a14038e 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -47,6 +47,12 @@ * accounting is preserved. */ +#define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10)) +#define CONGESTION_OFF_THRESH(congestion_kb) \ + (CONGESTION_ON_THRESH(congestion_kb) - \ + (CONGESTION_ON_THRESH(congestion_kb) >> 2)) + + /* * Dirty a page. Optimistically adjust accounting, on the assumption @@ -377,6 +383,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) { struct inode *inode; struct ceph_inode_info *ci; + struct ceph_client *client; struct ceph_osd_client *osdc; loff_t page_off = page->index << PAGE_CACHE_SHIFT; int len = PAGE_CACHE_SIZE; @@ -384,6 +391,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) int err = 0; struct ceph_snap_context *snapc; u64 snap_size = 0; + long writeback_stat; dout("writepage %p idx %lu\n", page, page->index); @@ -393,7 +401,8 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) } inode = page->mapping->host; ci = ceph_inode(inode); - osdc = &ceph_inode_to_client(inode)->osdc; + client = ceph_inode_to_client(inode); + osdc = &client->osdc; /* verify this is a writeable snap context */ snapc = (void *)page->private; @@ -420,6 +429,11 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) dout("writepage %p page %p index %lu on %llu~%u\n", inode, page, page->index, page_off, len); + writeback_stat = atomic_long_inc_return(&client->writeback_count); + if (writeback_stat > + CONGESTION_ON_THRESH(client->mount_args->congestion_kb)) + set_bdi_congested(&client->backing_dev_info, BLK_RW_ASYNC); + set_page_writeback(page); err = ceph_osdc_writepages(osdc, ceph_vino(inode), &ci->i_layout, snapc, @@ -499,6 +513,8 @@ static void writepages_finish(struct ceph_osd_request *req, struct writeback_control *wbc = req->r_wbc; __s32 rc = -EIO; u64 bytes = 0; + struct ceph_client *client = ceph_inode_to_client(inode); + long writeback_stat; /* parse reply */ replyhead = msg->front.iov_base; @@ -524,6 +540,13 @@ static void writepages_finish(struct ceph_osd_request *req, BUG_ON(!page); WARN_ON(!PageUptodate(page)); + writeback_stat = + atomic_long_dec_return(&client->writeback_count); + if (writeback_stat < + CONGESTION_OFF_THRESH(client->mount_args->congestion_kb)) + clear_bdi_congested(&client->backing_dev_info, + BLK_RW_ASYNC); + if (i >= wrote) { dout("inode %p skipping page %p\n", inode, page); wbc->pages_skipped++; @@ -666,6 +689,7 @@ retry: u64 offset, len; struct ceph_osd_request_head *reqhead; struct ceph_osd_op *op; + long writeback_stat; next = 0; locked_pages = 0; @@ -773,6 +797,12 @@ get_more_pages: first = i; dout("%p will write page %p idx %lu\n", inode, page, page->index); + + writeback_stat = atomic_long_inc_return(&client->writeback_count); + if (writeback_stat > CONGESTION_ON_THRESH(client->mount_args->congestion_kb)) { + set_bdi_congested(&client->backing_dev_info, BLK_RW_ASYNC); + } + set_page_writeback(page); req->r_pages[locked_pages] = page; locked_pages++; @@ -998,7 +1028,8 @@ static int ceph_write_end(struct file *file, struct address_space *mapping, struct page *page, void *fsdata) { struct inode *inode = file->f_dentry->d_inode; - struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc; + struct ceph_client *client = ceph_inode_to_client(inode); + struct ceph_mds_client *mdsc = &client->mdsc; unsigned from = pos & (PAGE_CACHE_SIZE - 1); int check_cap = 0; diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c index 441484ab7e94..22d3b47fb1be 100644 --- a/fs/ceph/debugfs.c +++ b/fs/ceph/debugfs.c @@ -320,6 +320,30 @@ DEFINE_SHOW_FUNC(osdc_show) DEFINE_SHOW_FUNC(dentry_lru_show) DEFINE_SHOW_FUNC(caps_show) +static int congestion_kb_set(void *data, u64 val) +{ + struct ceph_client *client = (struct ceph_client *)data; + + if (client) + client->mount_args->congestion_kb = (int)val; + + return 0; +} + +static int congestion_kb_get(void *data, u64 *val) +{ + struct ceph_client *client = (struct ceph_client *)data; + + if (client) + *val = (u64)client->mount_args->congestion_kb; + + return 0; +} + + +DEFINE_SIMPLE_ATTRIBUTE(congestion_kb_fops, congestion_kb_get, + congestion_kb_set, "%llu\n"); + int __init ceph_debugfs_init(void) { ceph_debugfs_dir = debugfs_create_dir("ceph", NULL); @@ -409,6 +433,14 @@ int ceph_debugfs_client_init(struct ceph_client *client) if (!client->debugfs_caps) goto out; + client->debugfs_congestion_kb = debugfs_create_file("writeback_congestion_kb", + 0600, + client->debugfs_dir, + client, + &congestion_kb_fops); + if (!client->debugfs_congestion_kb) + goto out; + sprintf(name, "../../bdi/%s", dev_name(client->sb->s_bdi->dev)); client->debugfs_bdi = debugfs_create_symlink("bdi", client->debugfs_dir, name); @@ -431,6 +463,7 @@ void ceph_debugfs_client_cleanup(struct ceph_client *client) debugfs_remove(client->osdc.debugfs_file); debugfs_remove(client->mdsc.debugfs_file); debugfs_remove(client->monc.debugfs_file); + debugfs_remove(client->debugfs_congestion_kb); debugfs_remove(client->debugfs_dir); } diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 6d02a166f8ff..b9cb8cebcdc1 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -150,6 +150,35 @@ static void ceph_inode_init_once(void *foo) inode_init_once(&ci->vfs_inode); } +static int default_congestion_kb(void) +{ + int congestion_kb; + + /* + * Copied from NFS + * + * congestion size, scale with available memory. + * + * 64MB: 8192k + * 128MB: 11585k + * 256MB: 16384k + * 512MB: 23170k + * 1GB: 32768k + * 2GB: 46340k + * 4GB: 65536k + * 8GB: 92681k + * 16GB: 131072k + * + * This allows larger machines to have larger/more transfers. + * Limit the default to 256M + */ + congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10); + if (congestion_kb > 256*1024) + congestion_kb = 256*1024; + + return congestion_kb; +} + static int __init init_caches(void) { ceph_inode_cachep = kmem_cache_create("ceph_inode_info", @@ -267,6 +296,7 @@ enum { Opt_caps_wanted_delay_min, Opt_caps_wanted_delay_max, Opt_readdir_max_entries, + Opt_congestion_kb, Opt_last_int, /* int args above */ Opt_snapdirname, @@ -295,6 +325,7 @@ static match_table_t arg_tokens = { {Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"}, {Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"}, {Opt_readdir_max_entries, "readdir_max_entries=%d"}, + {Opt_congestion_kb, "write_congestion_kb=%d"}, /* int args above */ {Opt_snapdirname, "snapdirname=%s"}, {Opt_name, "name=%s"}, @@ -342,6 +373,7 @@ static struct ceph_mount_args *parse_mount_args(int flags, char *options, args->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL); args->cap_release_safety = CEPH_CAPS_PER_RELEASE * 4; args->max_readdir = 1024; + args->congestion_kb = default_congestion_kb(); /* ip1[:port1][,ip2[:port2]...]:/subdir/in/fs */ err = -EINVAL; @@ -445,6 +477,9 @@ static struct ceph_mount_args *parse_mount_args(int flags, char *options, case Opt_readdir_max_entries: args->max_readdir = intval; break; + case Opt_congestion_kb: + args->congestion_kb = intval; + break; case Opt_noshare: args->flags |= CEPH_OPT_NOSHARE; @@ -516,6 +551,7 @@ static struct ceph_client *ceph_create_client(struct ceph_mount_args *args) client->msgr = NULL; client->mount_err = 0; + atomic_long_set(&client->writeback_count, 0); err = bdi_init(&client->backing_dev_info); if (err < 0) diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 2304bd2844a4..62d9ae482d72 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -59,6 +59,7 @@ struct ceph_mount_args { int wsize; int rsize; /* max readahead */ int max_readdir; /* max readdir size */ + int congestion_kb; /* max readdir size */ int osd_timeout; char *snapdir_name; /* default ".snap" */ char *name; @@ -136,6 +137,7 @@ struct ceph_client { struct workqueue_struct *wb_wq; struct workqueue_struct *pg_inv_wq; struct workqueue_struct *trunc_wq; + atomic_long_t writeback_count; struct backing_dev_info backing_dev_info; @@ -143,6 +145,7 @@ struct ceph_client { struct dentry *debugfs_monmap; struct dentry *debugfs_mdsmap, *debugfs_osdmap; struct dentry *debugfs_dir, *debugfs_dentry_lru, *debugfs_caps; + struct dentry *debugfs_congestion_kb; struct dentry *debugfs_bdi; #endif }; -- cgit v1.2.3 From c4a29f26d50bea65809ca670992108a33aa2efa6 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 21 Dec 2009 11:42:18 -0800 Subject: ceph: ensure rename target dentry fails revalidation This works around a bug in vfs_rename_dir() that rehashes the target dentry. Ensure such dentries always fail revalidation by timing out the dentry lease and kicking it out of the current directory lease gen. This can be reverted when the vfs bug is fixed. Signed-off-by: Sage Weil --- fs/ceph/inode.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index db684686f48a..8774b2811597 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -939,6 +939,10 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, req->r_old_dentry->d_name.len, req->r_old_dentry->d_name.name, dn, dn->d_name.len, dn->d_name.name); + /* ensure target dentry is invalidated, despite + rehashing bug in vfs_rename_dir */ + dn->d_time = jiffies; + ceph_dentry(dn)->lease_shared_gen = 0; /* take overwritten dentry's readdir offset */ ceph_dentry(req->r_old_dentry)->offset = ceph_dentry(dn)->offset; -- cgit v1.2.3 From 5de7bf8afa87f75af5ef3d6f9fce3e171cac834c Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 21 Dec 2009 11:48:44 -0800 Subject: ceph: do not drop lease during revalidate We need to hold session s_mutex for __ceph_mdsc_drop_dentry_lease(), which we don't, so skip it. It was purely an optimization. Signed-off-by: Sage Weil --- fs/ceph/dir.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 89ce3ba4a614..fde839c61236 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -920,8 +920,6 @@ static int dentry_lease_is_valid(struct dentry *dentry) di->lease_renew_after = 0; di->lease_renew_from = jiffies; } - } else { - __ceph_mdsc_drop_dentry_lease(dentry); } } spin_unlock(&dentry->d_lock); -- cgit v1.2.3 From 30dc6381bbac213987be6fe0b0fb89868ff1f2c0 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 21 Dec 2009 14:49:37 -0800 Subject: ceph: fix error paths for corrupt osdmap messages Both osdmap_decode() and osdmap_apply_incremental() should never return NULL. Signed-off-by: Sage Weil --- fs/ceph/osd_client.c | 2 ++ fs/ceph/osdmap.c | 11 ++++++----- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index 4bfe880d53c8..b474b3ad61f0 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -910,6 +910,7 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg) err = PTR_ERR(newmap); goto bad; } + BUG_ON(!newmap); if (newmap != osdc->osdmap) { ceph_osdmap_destroy(osdc->osdmap); osdc->osdmap = newmap; @@ -946,6 +947,7 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg) err = PTR_ERR(newmap); goto bad; } + BUG_ON(!newmap); oldmap = osdc->osdmap; osdc->osdmap = newmap; if (oldmap) diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c index 8c8ffe5ef7d4..a9416308de6f 100644 --- a/fs/ceph/osdmap.c +++ b/fs/ceph/osdmap.c @@ -200,6 +200,7 @@ static struct crush_map *crush_decode(void *pbyval, void *end) size = sizeof(struct crush_bucket_straw); break; default: + err = -EINVAL; goto bad; } BUG_ON(size == 0); @@ -278,6 +279,7 @@ static struct crush_map *crush_decode(void *pbyval, void *end) /* len */ ceph_decode_32_safe(p, end, yes, bad); #if BITS_PER_LONG == 32 + err = -EINVAL; if (yes > ULONG_MAX / sizeof(struct crush_rule_step)) goto bad; #endif @@ -489,11 +491,10 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) ceph_decode_copy(p, &pgid, sizeof(pgid)); n = ceph_decode_32(p); ceph_decode_need(p, end, n * sizeof(u32), bad); + err = -ENOMEM; pg = kmalloc(sizeof(*pg) + n*sizeof(u32), GFP_NOFS); - if (!pg) { - err = -ENOMEM; + if (!pg) goto bad; - } pg->pgid = pgid; pg->len = n; for (j = 0; j < n; j++) @@ -564,8 +565,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, if (len > 0) { dout("apply_incremental full map len %d, %p to %p\n", len, *p, end); - newmap = osdmap_decode(p, min(*p+len, end)); - return newmap; /* error or not */ + return osdmap_decode(p, min(*p+len, end)); } /* new crush? */ @@ -809,6 +809,7 @@ int ceph_calc_object_layout(struct ceph_object_layout *ol, struct ceph_pg_pool_info *pool; unsigned ps; + BUG_ON(!osdmap); if (poolid >= osdmap->num_pools) return -EIO; -- cgit v1.2.3 From 7067f797b8409f1e10ec95ac2c1e17a200173d13 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 21 Dec 2009 16:02:37 -0800 Subject: ceph: fix incremental osdmap pg_temp decoding bug An incremental pg_temp wasn't being decoded properly (wrong bound on for loop). Also remove unused local variable, while we're at it. Signed-off-by: Sage Weil --- fs/ceph/osdmap.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c index a9416308de6f..0dbd606e21c4 100644 --- a/fs/ceph/osdmap.c +++ b/fs/ceph/osdmap.c @@ -538,7 +538,6 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, struct ceph_osdmap *map, struct ceph_messenger *msgr) { - struct ceph_osdmap *newmap = map; struct crush_map *newcrush = NULL; struct ceph_fsid fsid; u32 epoch = 0; @@ -701,7 +700,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, } pg->pgid = pgid; pg->len = pglen; - for (j = 0; j < len; j++) + for (j = 0; j < pglen; j++) pg->osds[j] = ceph_decode_32(p); err = __insert_pg_mapping(pg, &map->pg_temp); if (err) -- cgit v1.2.3 From 5dacf09121ffb2e5fc7d15b78cae0b77042a1935 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 21 Dec 2009 20:40:34 -0800 Subject: ceph: do not touch_caps while iterating over caps list Avoid confusing iterate_session_caps(), flag the session while we are iterating so that __touch_cap does not rearrange items on the list. All other modifiers of session->s_caps do so under the protection of s_mutex. Signed-off-by: Sage Weil --- fs/ceph/caps.c | 11 ++++++++--- fs/ceph/mds_client.c | 14 ++++++++++---- fs/ceph/mds_client.h | 1 + 3 files changed, 19 insertions(+), 7 deletions(-) diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index dfb509f53542..93c1afe3f0b3 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -697,10 +697,15 @@ static void __touch_cap(struct ceph_cap *cap) { struct ceph_mds_session *s = cap->session; - dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap, - s->s_mds); spin_lock(&s->s_cap_lock); - list_move_tail(&cap->session_caps, &s->s_caps); + if (!s->s_iterating_caps) { + dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap, + s->s_mds); + list_move_tail(&cap->session_caps, &s->s_caps); + } else { + dout("__touch_cap %p cap %p mds%d NOP, iterating over caps\n", + &cap->ci->vfs_inode, cap, s->s_mds); + } spin_unlock(&s->s_cap_lock); } diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index d7cecc3366da..63ca3b1ad45f 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -337,10 +337,12 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc, s->s_renew_seq = 0; INIT_LIST_HEAD(&s->s_caps); s->s_nr_caps = 0; + s->s_trim_caps = 0; atomic_set(&s->s_ref, 1); INIT_LIST_HEAD(&s->s_waiting); INIT_LIST_HEAD(&s->s_unsafe); s->s_num_cap_releases = 0; + s->s_iterating_caps = false; INIT_LIST_HEAD(&s->s_cap_releases); INIT_LIST_HEAD(&s->s_cap_releases_done); INIT_LIST_HEAD(&s->s_cap_flushing); @@ -699,6 +701,7 @@ static int iterate_session_caps(struct ceph_mds_session *session, dout("iterate_session_caps %p mds%d\n", session, session->s_mds); spin_lock(&session->s_cap_lock); + session->s_iterating_caps = true; list_for_each_entry_safe(cap, ncap, &session->s_caps, session_caps) { inode = igrab(&cap->ci->vfs_inode); if (!inode) @@ -706,13 +709,15 @@ static int iterate_session_caps(struct ceph_mds_session *session, spin_unlock(&session->s_cap_lock); ret = cb(inode, cap, arg); iput(inode); - if (ret < 0) - return ret; spin_lock(&session->s_cap_lock); + if (ret < 0) + goto out; } + ret = 0; +out: + session->s_iterating_caps = false; spin_unlock(&session->s_cap_lock); - - return 0; + return ret; } static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap, @@ -935,6 +940,7 @@ static int trim_caps(struct ceph_mds_client *mdsc, dout("trim_caps mds%d done: %d / %d, trimmed %d\n", session->s_mds, session->s_nr_caps, max_caps, trim_caps - session->s_trim_caps); + session->s_trim_caps = 0; } return 0; } diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index 41af5ca316e6..b1c2025227c5 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -114,6 +114,7 @@ struct ceph_mds_session { int s_num_cap_releases; struct list_head s_cap_releases; /* waiting cap_release messages */ struct list_head s_cap_releases_done; /* ready to send */ + bool s_iterating_caps; /* protected by mutex */ struct list_head s_cap_flushing; /* inodes w/ flushing caps */ -- cgit v1.2.3 From e0e3271074e1ebd0b80a912a457ce03c971bcd66 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 21 Dec 2009 21:04:26 -0800 Subject: ceph: only unregister registered bdi Signed-off-by: Sage Weil --- fs/ceph/super.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/ceph/super.c b/fs/ceph/super.c index b9cb8cebcdc1..cd81c84e96fc 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -942,7 +942,8 @@ static void ceph_kill_sb(struct super_block *s) dout("kill_sb %p\n", s); ceph_mdsc_pre_umount(&client->mdsc); kill_anon_super(s); /* will call put_super after sb is r/o */ - bdi_unregister(&client->backing_dev_info); + if (s->s_bdi == &client->backing_dev_info) + bdi_unregister(&client->backing_dev_info); bdi_destroy(&client->backing_dev_info); ceph_destroy_client(client); } -- cgit v1.2.3 From 529cfcc46ffa2cbe4d07641c11e65f67fe7b66e4 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 22 Dec 2009 10:29:39 -0800 Subject: ceph: unregister canceled/timed out osd requests Canceled or timed out osd requests were getting left in the request list and never deallocated (until umount). Unregister if they are canceled (control-c) or time out. Signed-off-by: Sage Weil --- fs/ceph/osd_client.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index b474b3ad61f0..a1800fb63237 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -1071,8 +1071,9 @@ int ceph_osdc_wait_request(struct ceph_osd_client *osdc, if (rc < 0) { mutex_lock(&osdc->request_mutex); __cancel_request(req); + __unregister_request(osdc, req); mutex_unlock(&osdc->request_mutex); - dout("wait_request tid %llu timed out\n", req->r_tid); + dout("wait_request tid %llu canceled/timed out\n", req->r_tid); return rc; } -- cgit v1.2.3 From ec302645f4a9bd9ec757c30d185557e1c0972c1a Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 22 Dec 2009 10:43:42 -0800 Subject: ceph: use connection mutex to protect read and write stages Use a single mutex (previously out_mutex) to protect both read and write activity from concurrent ceph_con_* calls. Drop the mutex when doing callbacks to avoid nested locking (the callback may need to call something like ceph_con_close). Signed-off-by: Sage Weil --- fs/ceph/messenger.c | 49 +++++++++++++++++++++++++++++++------------------ fs/ceph/messenger.h | 3 ++- 2 files changed, 33 insertions(+), 19 deletions(-) diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index 2e4e9773c46b..c03b4185c143 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -316,7 +316,6 @@ static void reset_connection(struct ceph_connection *con) { /* reset connection, out_queue, msg_ and connect_seq */ /* discard existing out_queue and msg_seq */ - mutex_lock(&con->out_mutex); ceph_msg_remove_list(&con->out_queue); ceph_msg_remove_list(&con->out_sent); @@ -332,7 +331,6 @@ static void reset_connection(struct ceph_connection *con) con->out_msg = NULL; } con->in_seq = 0; - mutex_unlock(&con->out_mutex); } /* @@ -343,7 +341,9 @@ void ceph_con_close(struct ceph_connection *con) dout("con_close %p peer %s\n", con, pr_addr(&con->peer_addr.in_addr)); set_bit(CLOSED, &con->state); /* in case there's queued work */ clear_bit(STANDBY, &con->state); /* avoid connect_seq bump */ + mutex_lock(&con->mutex); reset_connection(con); + mutex_unlock(&con->mutex); queue_con(con); } @@ -392,7 +392,7 @@ void ceph_con_init(struct ceph_messenger *msgr, struct ceph_connection *con) memset(con, 0, sizeof(*con)); atomic_set(&con->nref, 1); con->msgr = msgr; - mutex_init(&con->out_mutex); + mutex_init(&con->mutex); INIT_LIST_HEAD(&con->out_queue); INIT_LIST_HEAD(&con->out_sent); INIT_DELAYED_WORK(&con->work, con_work); @@ -571,11 +571,13 @@ static void prepare_connect_authorizer(struct ceph_connection *con) int auth_len = 0; int auth_protocol = 0; + mutex_unlock(&con->mutex); if (con->ops->get_authorizer) con->ops->get_authorizer(con, &auth_buf, &auth_len, &auth_protocol, &con->auth_reply_buf, &con->auth_reply_buf_len, con->auth_retry); + mutex_lock(&con->mutex); con->out_connect.authorizer_protocol = cpu_to_le32(auth_protocol); con->out_connect.authorizer_len = cpu_to_le32(auth_len); @@ -1094,10 +1096,13 @@ static int process_connect(struct ceph_connection *con) le32_to_cpu(con->out_connect.protocol_version), le32_to_cpu(con->in_reply.protocol_version)); con->error_msg = "protocol version mismatch"; - if (con->ops->bad_proto) - con->ops->bad_proto(con); reset_connection(con); set_bit(CLOSED, &con->state); /* in case there's queued work */ + + mutex_unlock(&con->mutex); + if (con->ops->bad_proto) + con->ops->bad_proto(con); + mutex_lock(&con->mutex); return -1; case CEPH_MSGR_TAG_BADAUTHORIZER: @@ -1133,9 +1138,11 @@ static int process_connect(struct ceph_connection *con) prepare_read_connect(con); /* Tell ceph about it. */ + mutex_unlock(&con->mutex); pr_info("reset on %s%lld\n", ENTITY_NAME(con->peer_name)); if (con->ops->peer_reset) con->ops->peer_reset(con); + mutex_lock(&con->mutex); break; case CEPH_MSGR_TAG_RETRY_SESSION: @@ -1221,7 +1228,6 @@ static void process_ack(struct ceph_connection *con) u64 ack = le64_to_cpu(con->in_temp_ack); u64 seq; - mutex_lock(&con->out_mutex); while (!list_empty(&con->out_sent)) { m = list_first_entry(&con->out_sent, struct ceph_msg, list_head); @@ -1232,7 +1238,6 @@ static void process_ack(struct ceph_connection *con) le16_to_cpu(m->hdr.type), m); ceph_msg_remove(m); } - mutex_unlock(&con->out_mutex); prepare_read_tag(con); } @@ -1366,8 +1371,10 @@ static int read_partial_message(struct ceph_connection *con) /* find pages for data payload */ want = calc_pages_for(data_off & ~PAGE_MASK, data_len); ret = -1; + mutex_unlock(&con->mutex); if (con->ops->prepare_pages) ret = con->ops->prepare_pages(con, m, want); + mutex_lock(&con->mutex); if (ret < 0) { dout("%p prepare_pages failed, skipping payload\n", m); con->in_base_pos = -data_len - sizeof(m->footer); @@ -1454,9 +1461,8 @@ static void process_message(struct ceph_connection *con) if (con->peer_name.type == 0) con->peer_name = msg->hdr.src.name; - mutex_lock(&con->out_mutex); con->in_seq++; - mutex_unlock(&con->out_mutex); + mutex_unlock(&con->mutex); dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n", msg, le64_to_cpu(msg->hdr.seq), @@ -1467,6 +1473,8 @@ static void process_message(struct ceph_connection *con) le32_to_cpu(msg->hdr.data_len), con->in_front_crc, con->in_middle_crc, con->in_data_crc); con->ops->dispatch(con, msg); + + mutex_lock(&con->mutex); prepare_read_tag(con); } @@ -1483,7 +1491,7 @@ static int try_write(struct ceph_connection *con) dout("try_write start %p state %lu nref %d\n", con, con->state, atomic_read(&con->nref)); - mutex_lock(&con->out_mutex); + mutex_lock(&con->mutex); more: dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes); @@ -1576,7 +1584,7 @@ do_next: done: ret = 0; out: - mutex_unlock(&con->out_mutex); + mutex_unlock(&con->mutex); dout("try_write done on %p\n", con); return ret; } @@ -1600,6 +1608,8 @@ static int try_read(struct ceph_connection *con) dout("try_read start on %p\n", con); msgr = con->msgr; + mutex_lock(&con->mutex); + more: dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag, con->in_base_pos); @@ -1693,6 +1703,7 @@ more: done: ret = 0; out: + mutex_unlock(&con->mutex); dout("try_read done on %p\n", con); return ret; @@ -1818,6 +1829,8 @@ static void ceph_fault(struct ceph_connection *con) clear_bit(BUSY, &con->state); /* to avoid an improbable race */ + mutex_lock(&con->mutex); + con_close_socket(con); if (con->in_msg) { @@ -1827,24 +1840,24 @@ static void ceph_fault(struct ceph_connection *con) /* If there are no messages in the queue, place the connection * in a STANDBY state (i.e., don't try to reconnect just yet). */ - mutex_lock(&con->out_mutex); if (list_empty(&con->out_queue) && !con->out_keepalive_pending) { dout("fault setting STANDBY\n"); set_bit(STANDBY, &con->state); - mutex_unlock(&con->out_mutex); + mutex_unlock(&con->mutex); goto out; } /* Requeue anything that hasn't been acked, and retry after a * delay. */ list_splice_init(&con->out_sent, &con->out_queue); - mutex_unlock(&con->out_mutex); if (con->delay == 0) con->delay = BASE_DELAY_INTERVAL; else if (con->delay < MAX_DELAY_INTERVAL) con->delay *= 2; + mutex_unlock(&con->mutex); + /* explicitly schedule work to try to reconnect again later. */ dout("fault queueing %p delay %lu\n", con, con->delay); con->ops->get(con); @@ -1920,7 +1933,7 @@ void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg) msg->hdr.dst_erank = con->peer_addr.erank; /* queue */ - mutex_lock(&con->out_mutex); + mutex_lock(&con->mutex); BUG_ON(!list_empty(&msg->list_head)); list_add_tail(&msg->list_head, &con->out_queue); dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg, @@ -1929,7 +1942,7 @@ void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg) le32_to_cpu(msg->hdr.front_len), le32_to_cpu(msg->hdr.middle_len), le32_to_cpu(msg->hdr.data_len)); - mutex_unlock(&con->out_mutex); + mutex_unlock(&con->mutex); /* if there wasn't anything waiting to send before, queue * new work */ @@ -1942,7 +1955,7 @@ void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg) */ void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg) { - mutex_lock(&con->out_mutex); + mutex_lock(&con->mutex); if (!list_empty(&msg->list_head)) { dout("con_revoke %p msg %p\n", con, msg); list_del_init(&msg->list_head); @@ -1959,7 +1972,7 @@ void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg) } else { dout("con_revoke %p msg %p - not queued (sent?)\n", con, msg); } - mutex_unlock(&con->out_mutex); + mutex_unlock(&con->mutex); } /* diff --git a/fs/ceph/messenger.h b/fs/ceph/messenger.h index e04c214b4f6f..94b55de90331 100644 --- a/fs/ceph/messenger.h +++ b/fs/ceph/messenger.h @@ -155,8 +155,9 @@ struct ceph_connection { void *auth_reply_buf; /* where to put the authorizer reply */ int auth_reply_buf_len; + struct mutex mutex; + /* out queue */ - struct mutex out_mutex; struct list_head out_queue; struct list_head out_sent; /* sending or sent but unacked */ u64 out_seq; /* last message queued for send */ -- cgit v1.2.3 From 350b1c32ea58d29e25d63fc25e92dd48f9339546 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 22 Dec 2009 10:45:45 -0800 Subject: ceph: control access to page vector for incoming data When we issue an OSD read, we specify a vector of pages that the data is to be read into. The request may be sent multiple times, to multiple OSDs, if the osdmap changes, which means we can get more than one reply. Only read data into the page vector if the reply is coming from the OSD we last sent the request to. Keep track of which connection is using the vector by taking a reference. If another connection was already using the vector before and a new reply comes in on the right connection, revoke the pages from the other connection. Signed-off-by: Sage Weil --- fs/ceph/messenger.c | 29 +++++++++++++++++++++++++++++ fs/ceph/messenger.h | 2 ++ fs/ceph/osd_client.c | 42 +++++++++++++++++++++++++++++++++--------- fs/ceph/osd_client.h | 4 +++- 4 files changed, 67 insertions(+), 10 deletions(-) diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index c03b4185c143..506b638a023b 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -1975,6 +1975,35 @@ void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg) mutex_unlock(&con->mutex); } +/* + * Revoke a page vector that we may be reading data into + */ +void ceph_con_revoke_pages(struct ceph_connection *con, struct page **pages) +{ + mutex_lock(&con->mutex); + if (con->in_msg && con->in_msg->pages == pages) { + unsigned data_len = le32_to_cpu(con->in_hdr.data_len); + + /* skip rest of message */ + dout("con_revoke_pages %p msg %p pages %p revoked\n", con, + con->in_msg, pages); + if (con->in_msg_pos.data_pos < data_len) + con->in_base_pos = con->in_msg_pos.data_pos - data_len; + else + con->in_base_pos = con->in_base_pos - + sizeof(struct ceph_msg_header) - + sizeof(struct ceph_msg_footer); + con->in_msg->pages = NULL; + ceph_msg_put(con->in_msg); + con->in_msg = NULL; + con->in_tag = CEPH_MSGR_TAG_READY; + } else { + dout("con_revoke_pages %p msg %p pages %p no-op\n", + con, con->in_msg, pages); + } + mutex_unlock(&con->mutex); +} + /* * Queue a keepalive byte to ensure the tcp connection is alive. */ diff --git a/fs/ceph/messenger.h b/fs/ceph/messenger.h index 94b55de90331..7e2aab1d3ce2 100644 --- a/fs/ceph/messenger.h +++ b/fs/ceph/messenger.h @@ -230,6 +230,8 @@ extern void ceph_con_open(struct ceph_connection *con, extern void ceph_con_close(struct ceph_connection *con); extern void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg); extern void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg); +extern void ceph_con_revoke_pages(struct ceph_connection *con, + struct page **pages); extern void ceph_con_keepalive(struct ceph_connection *con); extern struct ceph_connection *ceph_con_get(struct ceph_connection *con); extern void ceph_con_put(struct ceph_connection *con); diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index a1800fb63237..374f0013956c 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -87,6 +87,13 @@ void ceph_osdc_release_request(struct kref *kref) ceph_msg_put(req->r_request); if (req->r_reply) ceph_msg_put(req->r_reply); + if (req->r_con_filling_pages) { + dout("release_request revoking pages %p from con %p\n", + req->r_pages, req->r_con_filling_pages); + ceph_con_revoke_pages(req->r_con_filling_pages, + req->r_pages); + ceph_con_put(req->r_con_filling_pages); + } if (req->r_own_pages) ceph_release_page_vector(req->r_pages, req->r_num_pages); @@ -687,7 +694,8 @@ static void handle_timeout(struct work_struct *work) * handle osd op reply. either call the callback if it is specified, * or do the completion to wake up the waiting thread. */ -static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg) +static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, + struct ceph_connection *con) { struct ceph_osd_reply_head *rhead = msg->front.iov_base; struct ceph_osd_request *req; @@ -715,6 +723,16 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg) ceph_osdc_get_request(req); flags = le32_to_cpu(rhead->flags); + /* + * if this connection filled our pages, drop our reference now, to + * avoid a (safe but slower) revoke later. + */ + if (req->r_con_filling_pages == con && req->r_pages == msg->pages) { + dout(" got pages, dropping con_filling_pages ref %p\n", con); + req->r_con_filling_pages = NULL; + ceph_con_put(con); + } + if (req->r_reply) { /* * once we see the message has been received, we don't @@ -1007,14 +1025,20 @@ static int prepare_pages(struct ceph_connection *con, struct ceph_msg *m, } dout("prepare_pages tid %llu has %d pages, want %d\n", tid, req->r_num_pages, want); - if (likely(req->r_num_pages >= want && !req->r_prepared_pages)) { - m->pages = req->r_pages; - m->nr_pages = req->r_num_pages; - req->r_reply = m; /* only for duration of read over socket */ - ceph_msg_get(m); - req->r_prepared_pages = 1; - ret = 0; /* success */ + if (unlikely(req->r_num_pages < want)) + goto out; + + if (req->r_con_filling_pages) { + dout("revoking pages %p from old con %p\n", req->r_pages, + req->r_con_filling_pages); + ceph_con_revoke_pages(req->r_con_filling_pages, req->r_pages); + ceph_con_put(req->r_con_filling_pages); } + req->r_con_filling_pages = ceph_con_get(con); + req->r_reply = ceph_msg_get(m); /* for duration of read over socket */ + m->pages = req->r_pages; + m->nr_pages = req->r_num_pages; + ret = 0; /* success */ out: mutex_unlock(&osdc->request_mutex); return ret; @@ -1269,7 +1293,7 @@ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) ceph_osdc_handle_map(osdc, msg); break; case CEPH_MSG_OSD_OPREPLY: - handle_reply(osdc, msg); + handle_reply(osdc, msg, con); break; default: diff --git a/fs/ceph/osd_client.h b/fs/ceph/osd_client.h index 2e4cfd1e9f10..8fef71cc4457 100644 --- a/fs/ceph/osd_client.h +++ b/fs/ceph/osd_client.h @@ -43,11 +43,13 @@ struct ceph_osd_request { struct list_head r_osd_item; struct ceph_osd *r_osd; + struct ceph_connection *r_con_filling_pages; + struct ceph_msg *r_request, *r_reply; int r_result; int r_flags; /* any additional flags for the osd */ u32 r_sent; /* >0 if r_request is sending/sent */ - int r_prepared_pages, r_got_reply; + int r_got_reply; int r_num_prealloc_reply; struct ceph_osd_client *r_osdc; -- cgit v1.2.3 From 0cf90ab5b075821940873e73cdbfeb8edc3dabe8 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 22 Dec 2009 10:45:18 -0800 Subject: ceph: more informative msgpool errors Signed-off-by: Sage Weil --- fs/ceph/msgpool.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/ceph/msgpool.c b/fs/ceph/msgpool.c index ad5482c0267b..2f04e0fc4666 100644 --- a/fs/ceph/msgpool.c +++ b/fs/ceph/msgpool.c @@ -140,7 +140,7 @@ struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, int front_len) return msg; } pr_err("msgpool_get %p now %d/%d, %s\n", pool, pool->num, - pool->min, pool->blocking ? "waiting" : "failing"); + pool->min, pool->blocking ? "waiting" : "may fail"); spin_unlock(&pool->lock); if (!pool->blocking) { @@ -151,6 +151,7 @@ struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, int front_len) if (!IS_ERR(msg)) return msg; + pr_err("msgpool_get %p empty + alloc failed\n", pool); return ERR_PTR(-ENOMEM); } -- cgit v1.2.3 From 6df058c025ce343052c5516b1d8a9a7e73cddd64 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 22 Dec 2009 11:24:33 -0800 Subject: ceph: include transaction id in ceph_msg_header (protocol change) Many (most?) message types include a transaction id. By including it in the fixed size header, we always have it available even when we are unable to allocate memory for the (larger, variable sized) message body. This will allow us to error out the appropriate request instead of (silently) dropping the reply. Signed-off-by: Sage Weil --- fs/ceph/caps.c | 16 ++++++++-------- fs/ceph/ceph_fs.h | 8 ++------ fs/ceph/mds_client.c | 5 +++-- fs/ceph/mon_client.c | 4 ++-- fs/ceph/msgr.h | 3 ++- fs/ceph/osd_client.c | 9 +++------ fs/ceph/rados.h | 2 -- 7 files changed, 20 insertions(+), 27 deletions(-) diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 93c1afe3f0b3..847ae64346fe 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -922,14 +922,14 @@ static int send_cap_msg(struct ceph_mds_session *session, if (IS_ERR(msg)) return PTR_ERR(msg); - fc = msg->front.iov_base; + msg->hdr.tid = cpu_to_le64(flush_tid); + fc = msg->front.iov_base; memset(fc, 0, sizeof(*fc)); fc->cap_id = cpu_to_le64(cid); fc->op = cpu_to_le32(op); fc->seq = cpu_to_le32(seq); - fc->client_tid = cpu_to_le64(flush_tid); fc->issue_seq = cpu_to_le32(issue_seq); fc->migrate_seq = cpu_to_le32(mseq); fc->caps = cpu_to_le32(caps); @@ -2329,7 +2329,7 @@ restart: * Handle FLUSH_ACK from MDS, indicating that metadata we sent to the * MDS has been safely committed. */ -static void handle_cap_flush_ack(struct inode *inode, +static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid, struct ceph_mds_caps *m, struct ceph_mds_session *session, struct ceph_cap *cap) @@ -2340,7 +2340,6 @@ static void handle_cap_flush_ack(struct inode *inode, unsigned seq = le32_to_cpu(m->seq); int dirty = le32_to_cpu(m->dirty); int cleaned = 0; - u64 flush_tid = le64_to_cpu(m->client_tid); int drop = 0; int i; @@ -2396,13 +2395,12 @@ out: * * Caller hold s_mutex. */ -static void handle_cap_flushsnap_ack(struct inode *inode, +static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid, struct ceph_mds_caps *m, struct ceph_mds_session *session) { struct ceph_inode_info *ci = ceph_inode(inode); u64 follows = le64_to_cpu(m->snap_follows); - u64 flush_tid = le64_to_cpu(m->client_tid); struct ceph_cap_snap *capsnap; int drop = 0; @@ -2587,12 +2585,14 @@ void ceph_handle_caps(struct ceph_mds_session *session, struct ceph_vino vino; u64 cap_id; u64 size, max_size; + u64 tid; int check_caps = 0; int r; dout("handle_caps from mds%d\n", mds); /* decode */ + tid = le64_to_cpu(msg->hdr.tid); if (msg->front.iov_len < sizeof(*h)) goto bad; h = msg->front.iov_base; @@ -2621,7 +2621,7 @@ void ceph_handle_caps(struct ceph_mds_session *session, /* these will work even if we don't have a cap yet */ switch (op) { case CEPH_CAP_OP_FLUSHSNAP_ACK: - handle_cap_flushsnap_ack(inode, h, session); + handle_cap_flushsnap_ack(inode, tid, h, session); goto done; case CEPH_CAP_OP_EXPORT: @@ -2662,7 +2662,7 @@ void ceph_handle_caps(struct ceph_mds_session *session, break; case CEPH_CAP_OP_FLUSH_ACK: - handle_cap_flush_ack(inode, h, session, cap); + handle_cap_flush_ack(inode, tid, h, session, cap); break; case CEPH_CAP_OP_TRUNC: diff --git a/fs/ceph/ceph_fs.h b/fs/ceph/ceph_fs.h index e2fd0247827e..e87dfa6ec8e5 100644 --- a/fs/ceph/ceph_fs.h +++ b/fs/ceph/ceph_fs.h @@ -35,7 +35,7 @@ * internal cluster protocols separately from the public, * client-facing protocol. */ -#define CEPH_OSD_PROTOCOL 7 /* cluster internal */ +#define CEPH_OSD_PROTOCOL 8 /* cluster internal */ #define CEPH_MDS_PROTOCOL 9 /* cluster internal */ #define CEPH_MON_PROTOCOL 5 /* cluster internal */ #define CEPH_OSDC_PROTOCOL 22 /* server/client */ @@ -136,7 +136,6 @@ struct ceph_mon_request_header { struct ceph_mon_statfs { struct ceph_mon_request_header monhdr; struct ceph_fsid fsid; - __le64 tid; } __attribute__ ((packed)); struct ceph_statfs { @@ -146,7 +145,6 @@ struct ceph_statfs { struct ceph_mon_statfs_reply { struct ceph_fsid fsid; - __le64 tid; __le64 version; struct ceph_statfs st; } __attribute__ ((packed)); @@ -333,7 +331,7 @@ union ceph_mds_request_args { #define CEPH_MDS_FLAG_WANT_DENTRY 2 /* want dentry in reply */ struct ceph_mds_request_head { - __le64 tid, oldest_client_tid; + __le64 oldest_client_tid; __le32 mdsmap_epoch; /* on client */ __le32 flags; /* CEPH_MDS_FLAG_* */ __u8 num_retry, num_fwd; /* count retry, fwd attempts */ @@ -356,7 +354,6 @@ struct ceph_mds_request_release { /* client reply */ struct ceph_mds_reply_head { - __le64 tid; __le32 op; __le32 result; __le32 mdsmap_epoch; @@ -542,7 +539,6 @@ struct ceph_mds_caps { __le32 migrate_seq; __le64 snap_follows; __le32 snap_trace_len; - __le64 client_tid; /* for FLUSH(SNAP) -> FLUSH(SNAP)_ACK */ /* authlock */ __le32 uid, gid, mode; diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 63ca3b1ad45f..ec884e2845db 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1339,6 +1339,8 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, if (IS_ERR(msg)) goto out_free2; + msg->hdr.tid = cpu_to_le64(req->r_tid); + head = msg->front.iov_base; p = msg->front.iov_base + sizeof(*head); end = msg->front.iov_base + msg->front.iov_len; @@ -1431,7 +1433,6 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc, req->r_request = msg; rhead = msg->front.iov_base; - rhead->tid = cpu_to_le64(req->r_tid); rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc)); if (req->r_got_unsafe) flags |= CEPH_MDS_FLAG_REPLAY; @@ -1664,7 +1665,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) } /* get request, session */ - tid = le64_to_cpu(head->tid); + tid = le64_to_cpu(msg->hdr.tid); mutex_lock(&mdsc->mutex); req = __lookup_request(mdsc, tid); if (!req) { diff --git a/fs/ceph/mon_client.c b/fs/ceph/mon_client.c index 775a9c029c51..bb94006fc686 100644 --- a/fs/ceph/mon_client.c +++ b/fs/ceph/mon_client.c @@ -349,7 +349,7 @@ static void handle_statfs_reply(struct ceph_mon_client *monc, if (msg->front.iov_len != sizeof(*reply)) goto bad; - tid = le64_to_cpu(reply->tid); + tid = le64_to_cpu(msg->hdr.tid); dout("handle_statfs_reply %p tid %llu\n", msg, tid); mutex_lock(&monc->mutex); @@ -382,12 +382,12 @@ static int send_statfs(struct ceph_mon_client *monc, if (IS_ERR(msg)) return PTR_ERR(msg); req->request = msg; + msg->hdr.tid = cpu_to_le64(req->tid); h = msg->front.iov_base; h->monhdr.have_version = 0; h->monhdr.session_mon = cpu_to_le16(-1); h->monhdr.session_mon_tid = 0; h->fsid = monc->monmap->fsid; - h->tid = cpu_to_le64(req->tid); ceph_con_send(monc->con, msg); return 0; } diff --git a/fs/ceph/msgr.h b/fs/ceph/msgr.h index c758e8f8f71b..e46d8b806dea 100644 --- a/fs/ceph/msgr.h +++ b/fs/ceph/msgr.h @@ -21,7 +21,7 @@ * whenever the wire protocol changes. try to keep this string length * constant. */ -#define CEPH_BANNER "ceph v024" +#define CEPH_BANNER "ceph v025" #define CEPH_BANNER_MAX_LEN 30 @@ -132,6 +132,7 @@ struct ceph_msg_connect_reply { */ struct ceph_msg_header { __le64 seq; /* message seq# for this session */ + __le64 tid; /* transaction id */ __le16 type; /* message type */ __le16 priority; /* priority. higher value == higher priority */ __le16 version; /* version of message encoding */ diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index 374f0013956c..a0aac436d5d4 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -439,11 +439,9 @@ static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o) static void register_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req) { - struct ceph_osd_request_head *head = req->r_request->front.iov_base; - mutex_lock(&osdc->request_mutex); req->r_tid = ++osdc->last_tid; - head->tid = cpu_to_le64(req->r_tid); + req->r_request->hdr.tid = cpu_to_le64(req->r_tid); dout("register_request %p tid %lld\n", req, req->r_tid); __insert_request(osdc, req); @@ -702,9 +700,9 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, u64 tid; int numops, object_len, flags; + tid = le64_to_cpu(msg->hdr.tid); if (msg->front.iov_len < sizeof(*rhead)) goto bad; - tid = le64_to_cpu(rhead->tid); numops = le32_to_cpu(rhead->num_ops); object_len = le32_to_cpu(rhead->object_len); if (msg->front.iov_len != sizeof(*rhead) + object_len + @@ -1002,7 +1000,6 @@ static int prepare_pages(struct ceph_connection *con, struct ceph_msg *m, { struct ceph_osd *osd = con->private; struct ceph_osd_client *osdc; - struct ceph_osd_reply_head *rhead = m->front.iov_base; struct ceph_osd_request *req; u64 tid; int ret = -1; @@ -1016,7 +1013,7 @@ static int prepare_pages(struct ceph_connection *con, struct ceph_msg *m, if (unlikely(type != CEPH_MSG_OSD_OPREPLY)) return -1; /* hmm! */ - tid = le64_to_cpu(rhead->tid); + tid = le64_to_cpu(m->hdr.tid); mutex_lock(&osdc->request_mutex); req = __lookup_request(osdc, tid); if (!req) { diff --git a/fs/ceph/rados.h b/fs/ceph/rados.h index 12bfb2f7c275..c5614d4ae34a 100644 --- a/fs/ceph/rados.h +++ b/fs/ceph/rados.h @@ -331,7 +331,6 @@ struct ceph_osd_op { * ceph_osd_op object operations. */ struct ceph_osd_request_head { - __le64 tid; /* transaction id */ __le32 client_inc; /* client incarnation */ struct ceph_object_layout layout; /* pgid */ __le32 osdmap_epoch; /* client's osdmap epoch */ @@ -352,7 +351,6 @@ struct ceph_osd_request_head { } __attribute__ ((packed)); struct ceph_osd_reply_head { - __le64 tid; /* transaction id */ __le32 client_inc; /* client incarnation */ __le32 flags; struct ceph_object_layout layout; -- cgit v1.2.3 From 04a419f908b5291ff7e8ffd7aa351fa0ac0c08af Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 23 Dec 2009 09:30:21 -0800 Subject: ceph: add feature bits to connection handshake (protocol change) Define supported and required feature set. Fail connection if the server requires features we do not support (TAG_FEATURES), or if the server does not support features we require. Signed-off-by: Sage Weil --- fs/ceph/ceph_fs.h | 6 ++++++ fs/ceph/dir.c | 12 ++++++------ fs/ceph/messenger.c | 47 +++++++++++++++++++++++++++++++++++++---------- fs/ceph/msgr.h | 5 ++++- 4 files changed, 53 insertions(+), 17 deletions(-) diff --git a/fs/ceph/ceph_fs.h b/fs/ceph/ceph_fs.h index e87dfa6ec8e5..db3fed33c4aa 100644 --- a/fs/ceph/ceph_fs.h +++ b/fs/ceph/ceph_fs.h @@ -50,6 +50,12 @@ #define CEPH_MAX_MON 31 +/* + * feature bits + */ +#define CEPH_FEATURE_SUPPORTED 0 +#define CEPH_FEATURE_REQUIRED 0 + /* * ceph_file_layout - describe data layout for a file/inode diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index fde839c61236..5107384ee029 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -1135,9 +1135,9 @@ void ceph_dentry_lru_add(struct dentry *dn) { struct ceph_dentry_info *di = ceph_dentry(dn); struct ceph_mds_client *mdsc; - dout("dentry_lru_add %p %p\t%.*s\n", - di, dn, dn->d_name.len, dn->d_name.name); + dout("dentry_lru_add %p %p '%.*s'\n", di, dn, + dn->d_name.len, dn->d_name.name); if (di) { mdsc = &ceph_client(dn->d_sb)->mdsc; spin_lock(&mdsc->dentry_lru_lock); @@ -1151,9 +1151,9 @@ void ceph_dentry_lru_touch(struct dentry *dn) { struct ceph_dentry_info *di = ceph_dentry(dn); struct ceph_mds_client *mdsc; - dout("dentry_lru_touch %p %p\t%.*s\n", - di, dn, dn->d_name.len, dn->d_name.name); + dout("dentry_lru_touch %p %p '%.*s'\n", di, dn, + dn->d_name.len, dn->d_name.name); if (di) { mdsc = &ceph_client(dn->d_sb)->mdsc; spin_lock(&mdsc->dentry_lru_lock); @@ -1167,8 +1167,8 @@ void ceph_dentry_lru_del(struct dentry *dn) struct ceph_dentry_info *di = ceph_dentry(dn); struct ceph_mds_client *mdsc; - dout("dentry_lru_del %p %p\t%.*s\n", - di, dn, dn->d_name.len, dn->d_name.name); + dout("dentry_lru_del %p %p '%.*s'\n", di, dn, + dn->d_name.len, dn->d_name.name); if (di) { mdsc = &ceph_client(dn->d_sb)->mdsc; spin_lock(&mdsc->dentry_lru_lock); diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index 506b638a023b..68052f664280 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -631,6 +631,7 @@ static void prepare_write_connect(struct ceph_messenger *msgr, dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con, con->connect_seq, global_seq, proto); + con->out_connect.features = CEPH_FEATURE_SUPPORTED; con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT); con->out_connect.connect_seq = cpu_to_le32(con->connect_seq); con->out_connect.global_seq = cpu_to_le32(global_seq); @@ -1080,15 +1081,37 @@ static int process_banner(struct ceph_connection *con) return 0; } +static void fail_protocol(struct ceph_connection *con) +{ + reset_connection(con); + set_bit(CLOSED, &con->state); /* in case there's queued work */ + + mutex_unlock(&con->mutex); + if (con->ops->bad_proto) + con->ops->bad_proto(con); + mutex_lock(&con->mutex); +} + static int process_connect(struct ceph_connection *con) { + u64 sup_feat = CEPH_FEATURE_SUPPORTED; + u64 req_feat = CEPH_FEATURE_REQUIRED; + u64 server_feat = le64_to_cpu(con->in_reply.features); + dout("process_connect on %p tag %d\n", con, (int)con->in_tag); switch (con->in_reply.tag) { + case CEPH_MSGR_TAG_FEATURES: + pr_err("%s%lld %s feature set mismatch," + " my %llx < server's %llx, missing %llx\n", + ENTITY_NAME(con->peer_name), + pr_addr(&con->peer_addr.in_addr), + sup_feat, server_feat, server_feat & ~sup_feat); + con->error_msg = "missing required protocol features"; + fail_protocol(con); + return -1; + case CEPH_MSGR_TAG_BADPROTOVER: - dout("process_connect got BADPROTOVER my %d != their %d\n", - le32_to_cpu(con->out_connect.protocol_version), - le32_to_cpu(con->in_reply.protocol_version)); pr_err("%s%lld %s protocol version mismatch," " my %d != server's %d\n", ENTITY_NAME(con->peer_name), @@ -1096,13 +1119,7 @@ static int process_connect(struct ceph_connection *con) le32_to_cpu(con->out_connect.protocol_version), le32_to_cpu(con->in_reply.protocol_version)); con->error_msg = "protocol version mismatch"; - reset_connection(con); - set_bit(CLOSED, &con->state); /* in case there's queued work */ - - mutex_unlock(&con->mutex); - if (con->ops->bad_proto) - con->ops->bad_proto(con); - mutex_lock(&con->mutex); + fail_protocol(con); return -1; case CEPH_MSGR_TAG_BADAUTHORIZER: @@ -1173,6 +1190,16 @@ static int process_connect(struct ceph_connection *con) break; case CEPH_MSGR_TAG_READY: + if (req_feat & ~server_feat) { + pr_err("%s%lld %s protocol feature mismatch," + " my required %llx > server's %llx, need %llx\n", + ENTITY_NAME(con->peer_name), + pr_addr(&con->peer_addr.in_addr), + req_feat, server_feat, req_feat & ~server_feat); + con->error_msg = "missing required protocol features"; + fail_protocol(con); + return -1; + } clear_bit(CONNECTING, &con->state); con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq); con->connect_seq++; diff --git a/fs/ceph/msgr.h b/fs/ceph/msgr.h index e46d8b806dea..be83f93182ee 100644 --- a/fs/ceph/msgr.h +++ b/fs/ceph/msgr.h @@ -21,7 +21,7 @@ * whenever the wire protocol changes. try to keep this string length * constant. */ -#define CEPH_BANNER "ceph v025" +#define CEPH_BANNER "ceph v026" #define CEPH_BANNER_MAX_LEN 30 @@ -100,12 +100,14 @@ struct ceph_entity_inst { #define CEPH_MSGR_TAG_KEEPALIVE 9 /* just a keepalive byte! */ #define CEPH_MSGR_TAG_BADPROTOVER 10 /* bad protocol version */ #define CEPH_MSGR_TAG_BADAUTHORIZER 11 /* bad authorizer */ +#define CEPH_MSGR_TAG_FEATURES 12 /* insufficient features */ /* * connection negotiation */ struct ceph_msg_connect { + __le64 features; /* supported feature bits */ __le32 host_type; /* CEPH_ENTITY_TYPE_* */ __le32 global_seq; /* count connections initiated by this host */ __le32 connect_seq; /* count connections initiated in this session */ @@ -117,6 +119,7 @@ struct ceph_msg_connect { struct ceph_msg_connect_reply { __u8 tag; + __le64 features; /* feature bits for this session */ __le32 global_seq; __le32 connect_seq; __le32 protocol_version; -- cgit v1.2.3 From 58bb3b374b07a2a43315213f00a48a5ffd6d0915 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 23 Dec 2009 12:12:31 -0800 Subject: ceph: support ceph_pagelist for message payload The ceph_pagelist is a simple list of whole pages, strung together via their lru list_head. It facilitates encoding to a "buffer" of unknown size. Allow its use in place of the ceph_msg page vector. This will be used to fix the huge buffer preallocation woes of MDS reconnection. Signed-off-by: Sage Weil --- fs/ceph/Makefile | 2 +- fs/ceph/messenger.c | 24 ++++++++++++++++++++---- fs/ceph/messenger.h | 1 + fs/ceph/pagelist.c | 54 +++++++++++++++++++++++++++++++++++++++++++++++++++++ fs/ceph/pagelist.h | 54 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 130 insertions(+), 5 deletions(-) create mode 100644 fs/ceph/pagelist.c create mode 100644 fs/ceph/pagelist.h diff --git a/fs/ceph/Makefile b/fs/ceph/Makefile index 827629c85768..47caf2f1b75a 100644 --- a/fs/ceph/Makefile +++ b/fs/ceph/Makefile @@ -8,7 +8,7 @@ obj-$(CONFIG_CEPH_FS) += ceph.o ceph-objs := super.o inode.o dir.o file.o addr.o ioctl.o \ export.o caps.o snap.o xattr.o \ - messenger.o msgpool.o buffer.o \ + messenger.o msgpool.o buffer.o pagelist.o \ mds_client.o mdsmap.o \ mon_client.o \ osd_client.o osdmap.o crush/crush.o crush/mapper.o crush/hash.o \ diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index 68052f664280..c1106e8360f0 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -13,6 +13,7 @@ #include "super.h" #include "messenger.h" #include "decode.h" +#include "pagelist.h" /* * Ceph uses the messenger to exchange ceph_msg messages with other @@ -728,6 +729,11 @@ static int write_partial_msg_pages(struct ceph_connection *con) page = msg->pages[con->out_msg_pos.page]; if (crc) kaddr = kmap(page); + } else if (msg->pagelist) { + page = list_first_entry(&msg->pagelist->head, + struct page, lru); + if (crc) + kaddr = kmap(page); } else { page = con->msgr->zero_page; if (crc) @@ -750,7 +756,7 @@ static int write_partial_msg_pages(struct ceph_connection *con) MSG_DONTWAIT | MSG_NOSIGNAL | MSG_MORE); - if (crc && msg->pages) + if (crc && (msg->pages || msg->pagelist)) kunmap(page); if (ret <= 0) @@ -762,6 +768,9 @@ static int write_partial_msg_pages(struct ceph_connection *con) con->out_msg_pos.page_pos = 0; con->out_msg_pos.page++; con->out_msg_pos.did_page_crc = 0; + if (msg->pagelist) + list_move_tail(&page->lru, + &msg->pagelist->head); } } @@ -1051,13 +1060,13 @@ static int process_banner(struct ceph_connection *con) &con->actual_peer_addr) && !(addr_is_blank(&con->actual_peer_addr.in_addr) && con->actual_peer_addr.nonce == con->peer_addr.nonce)) { - pr_err("wrong peer, want %s/%d, " - "got %s/%d, wtf\n", + pr_warning("wrong peer, want %s/%d, " + "got %s/%d\n", pr_addr(&con->peer_addr.in_addr), con->peer_addr.nonce, pr_addr(&con->actual_peer_addr.in_addr), con->actual_peer_addr.nonce); - con->error_msg = "protocol error, wrong peer"; + con->error_msg = "wrong peer at address"; return -1; } @@ -2096,6 +2105,7 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, /* data */ m->nr_pages = calc_pages_for(page_off, page_len); m->pages = pages; + m->pagelist = NULL; dout("ceph_msg_new %p page %d~%d -> %d\n", m, page_off, page_len, m->nr_pages); @@ -2181,6 +2191,12 @@ void ceph_msg_last_put(struct kref *kref) m->nr_pages = 0; m->pages = NULL; + if (m->pagelist) { + ceph_pagelist_release(m->pagelist); + kfree(m->pagelist); + m->pagelist = NULL; + } + if (m->pool) ceph_msgpool_put(m->pool, m); else diff --git a/fs/ceph/messenger.h b/fs/ceph/messenger.h index 7e2aab1d3ce2..a7b684145092 100644 --- a/fs/ceph/messenger.h +++ b/fs/ceph/messenger.h @@ -85,6 +85,7 @@ struct ceph_msg { struct ceph_buffer *middle; struct page **pages; /* data payload. NOT OWNER. */ unsigned nr_pages; /* size of page array */ + struct ceph_pagelist *pagelist; /* instead of pages */ struct list_head list_head; struct kref kref; bool front_is_vmalloc; diff --git a/fs/ceph/pagelist.c b/fs/ceph/pagelist.c new file mode 100644 index 000000000000..370e93695474 --- /dev/null +++ b/fs/ceph/pagelist.c @@ -0,0 +1,54 @@ + +#include +#include + +#include "pagelist.h" + +int ceph_pagelist_release(struct ceph_pagelist *pl) +{ + if (pl->mapped_tail) + kunmap(pl->mapped_tail); + while (!list_empty(&pl->head)) { + struct page *page = list_first_entry(&pl->head, struct page, + lru); + list_del(&page->lru); + __free_page(page); + } + return 0; +} + +static int ceph_pagelist_addpage(struct ceph_pagelist *pl) +{ + struct page *page = alloc_page(GFP_NOFS); + if (!page) + return -ENOMEM; + pl->room += PAGE_SIZE; + list_add_tail(&page->lru, &pl->head); + if (pl->mapped_tail) + kunmap(pl->mapped_tail); + pl->mapped_tail = kmap(page); + return 0; +} + +int ceph_pagelist_append(struct ceph_pagelist *pl, void *buf, size_t len) +{ + while (pl->room < len) { + size_t bit = pl->room; + int ret; + + memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK), + buf, bit); + pl->length += bit; + pl->room -= bit; + buf += bit; + len -= bit; + ret = ceph_pagelist_addpage(pl); + if (ret) + return ret; + } + + memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK), buf, len); + pl->length += len; + pl->room -= len; + return 0; +} diff --git a/fs/ceph/pagelist.h b/fs/ceph/pagelist.h new file mode 100644 index 000000000000..e8a4187e1087 --- /dev/null +++ b/fs/ceph/pagelist.h @@ -0,0 +1,54 @@ +#ifndef __FS_CEPH_PAGELIST_H +#define __FS_CEPH_PAGELIST_H + +#include + +struct ceph_pagelist { + struct list_head head; + void *mapped_tail; + size_t length; + size_t room; +}; + +static inline void ceph_pagelist_init(struct ceph_pagelist *pl) +{ + INIT_LIST_HEAD(&pl->head); + pl->mapped_tail = NULL; + pl->length = 0; + pl->room = 0; +} +extern int ceph_pagelist_release(struct ceph_pagelist *pl); + +extern int ceph_pagelist_append(struct ceph_pagelist *pl, void *d, size_t l); + +static inline int ceph_pagelist_encode_64(struct ceph_pagelist *pl, u64 v) +{ + __le64 ev = cpu_to_le64(v); + return ceph_pagelist_append(pl, &ev, sizeof(ev)); +} +static inline int ceph_pagelist_encode_32(struct ceph_pagelist *pl, u32 v) +{ + __le32 ev = cpu_to_le32(v); + return ceph_pagelist_append(pl, &ev, sizeof(ev)); +} +static inline int ceph_pagelist_encode_16(struct ceph_pagelist *pl, u16 v) +{ + __le16 ev = cpu_to_le16(v); + return ceph_pagelist_append(pl, &ev, sizeof(ev)); +} +static inline int ceph_pagelist_encode_8(struct ceph_pagelist *pl, u8 v) +{ + return ceph_pagelist_append(pl, &v, 1); +} +static inline int ceph_pagelist_encode_string(struct ceph_pagelist *pl, + char *s, size_t len) +{ + int ret = ceph_pagelist_encode_32(pl, len); + if (ret) + return ret; + if (len) + return ceph_pagelist_append(pl, s, len); + return 0; +} + +#endif -- cgit v1.2.3 From 93cea5bebf91319095db866163a7e35c3e77d8f2 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 23 Dec 2009 12:21:51 -0800 Subject: ceph: use ceph_pagelist for mds reconnect message; change encoding (protocol change) Use the ceph_pagelist to encode the MDS reconnect message. We change the message encoding (protocol change!) at the same time to make our life easier (we don't know how many snaprealms we have when we start encoding). An empty message implies the session is closed/does not exist. Signed-off-by: Sage Weil --- fs/ceph/ceph_fs.h | 2 +- fs/ceph/mds_client.c | 156 ++++++++++++++++++--------------------------------- 2 files changed, 57 insertions(+), 101 deletions(-) diff --git a/fs/ceph/ceph_fs.h b/fs/ceph/ceph_fs.h index db3fed33c4aa..d0f2557bb41b 100644 --- a/fs/ceph/ceph_fs.h +++ b/fs/ceph/ceph_fs.h @@ -39,7 +39,7 @@ #define CEPH_MDS_PROTOCOL 9 /* cluster internal */ #define CEPH_MON_PROTOCOL 5 /* cluster internal */ #define CEPH_OSDC_PROTOCOL 22 /* server/client */ -#define CEPH_MDSC_PROTOCOL 30 /* server/client */ +#define CEPH_MDSC_PROTOCOL 31 /* server/client */ #define CEPH_MONC_PROTOCOL 15 /* server/client */ diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index ec884e2845db..6e08f488a30f 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -9,6 +9,7 @@ #include "messenger.h" #include "decode.h" #include "auth.h" +#include "pagelist.h" /* * A cluster of MDS (metadata server) daemons is responsible for @@ -1971,20 +1972,12 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc, /* * Encode information about a cap for a reconnect with the MDS. */ -struct encode_caps_data { - void **pp; - void *end; - int *num_caps; -}; - static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg) { - struct ceph_mds_cap_reconnect *rec; + struct ceph_mds_cap_reconnect rec; struct ceph_inode_info *ci; - struct encode_caps_data *data = (struct encode_caps_data *)arg; - void *p = *(data->pp); - void *end = data->end; + struct ceph_pagelist *pagelist = arg; char *path; int pathlen, err; u64 pathbase; @@ -1995,8 +1988,9 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap, dout(" adding %p ino %llx.%llx cap %p %lld %s\n", inode, ceph_vinop(inode), cap, cap->cap_id, ceph_cap_string(cap->issued)); - ceph_decode_need(&p, end, sizeof(u64), needmore); - ceph_encode_64(&p, ceph_ino(inode)); + err = ceph_pagelist_encode_64(pagelist, ceph_ino(inode)); + if (err) + return err; dentry = d_find_alias(inode); if (dentry) { @@ -2009,33 +2003,29 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap, path = NULL; pathlen = 0; } - ceph_decode_need(&p, end, pathlen+4, needmore); - ceph_encode_string(&p, end, path, pathlen); + err = ceph_pagelist_encode_string(pagelist, path, pathlen); + if (err) + goto out; - ceph_decode_need(&p, end, sizeof(*rec), needmore); - rec = p; - p += sizeof(*rec); - BUG_ON(p > end); spin_lock(&inode->i_lock); cap->seq = 0; /* reset cap seq */ cap->issue_seq = 0; /* and issue_seq */ - rec->cap_id = cpu_to_le64(cap->cap_id); - rec->pathbase = cpu_to_le64(pathbase); - rec->wanted = cpu_to_le32(__ceph_caps_wanted(ci)); - rec->issued = cpu_to_le32(cap->issued); - rec->size = cpu_to_le64(inode->i_size); - ceph_encode_timespec(&rec->mtime, &inode->i_mtime); - ceph_encode_timespec(&rec->atime, &inode->i_atime); - rec->snaprealm = cpu_to_le64(ci->i_snap_realm->ino); + rec.cap_id = cpu_to_le64(cap->cap_id); + rec.pathbase = cpu_to_le64(pathbase); + rec.wanted = cpu_to_le32(__ceph_caps_wanted(ci)); + rec.issued = cpu_to_le32(cap->issued); + rec.size = cpu_to_le64(inode->i_size); + ceph_encode_timespec(&rec.mtime, &inode->i_mtime); + ceph_encode_timespec(&rec.atime, &inode->i_atime); + rec.snaprealm = cpu_to_le64(ci->i_snap_realm->ino); spin_unlock(&inode->i_lock); + err = ceph_pagelist_append(pagelist, &rec, sizeof(rec)); + +out: kfree(path); dput(dentry); - (*data->num_caps)++; - *(data->pp) = p; - return 0; -needmore: - return -ENOSPC; + return err; } @@ -2053,19 +2043,26 @@ needmore: */ static void send_mds_reconnect(struct ceph_mds_client *mdsc, int mds) { - struct ceph_mds_session *session; + struct ceph_mds_session *session = NULL; struct ceph_msg *reply; - int newlen, len = 4 + 1; - void *p, *end; int err; - int num_caps, num_realms = 0; int got; u64 next_snap_ino = 0; - __le32 *pnum_caps, *pnum_realms; - struct encode_caps_data iter_args; + struct ceph_pagelist *pagelist; pr_info("reconnect to recovering mds%d\n", mds); + pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS); + if (!pagelist) + goto fail_nopagelist; + ceph_pagelist_init(pagelist); + + reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0, 0, 0, NULL); + if (IS_ERR(reply)) { + err = PTR_ERR(reply); + goto fail_nomsg; + } + /* find session */ session = __ceph_lookup_mds_session(mdsc, mds); mutex_unlock(&mdsc->mutex); /* drop lock for duration */ @@ -2081,12 +2078,6 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc, int mds) /* replay unsafe requests */ replay_unsafe_requests(mdsc, session); - - /* estimate needed space */ - len += session->s_nr_caps * - (100+sizeof(struct ceph_mds_cap_reconnect)); - pr_info("estimating i need %d bytes for %d caps\n", - len, session->s_nr_caps); } else { dout("no session for mds%d, will send short reconnect\n", mds); @@ -2094,41 +2085,18 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc, int mds) down_read(&mdsc->snap_rwsem); -retry: - /* build reply */ - reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, len, 0, 0, NULL); - if (IS_ERR(reply)) { - err = PTR_ERR(reply); - pr_err("send_mds_reconnect ENOMEM on %d for mds%d\n", - len, mds); - goto out; - } - p = reply->front.iov_base; - end = p + len; - - if (!session) { - ceph_encode_8(&p, 1); /* session was closed */ - ceph_encode_32(&p, 0); + if (!session) goto send; - } dout("session %p state %s\n", session, session_state_name(session->s_state)); /* traverse this session's caps */ - ceph_encode_8(&p, 0); - pnum_caps = p; - ceph_encode_32(&p, session->s_nr_caps); - num_caps = 0; - - iter_args.pp = &p; - iter_args.end = end; - iter_args.num_caps = &num_caps; - err = iterate_session_caps(session, encode_caps_cb, &iter_args); - if (err == -ENOSPC) - goto needmore; + err = ceph_pagelist_encode_32(pagelist, session->s_nr_caps); + if (err) + goto fail; + err = iterate_session_caps(session, encode_caps_cb, pagelist); if (err < 0) goto out; - *pnum_caps = cpu_to_le32(num_caps); /* * snaprealms. we provide mds with the ino, seq (version), and @@ -2136,14 +2104,9 @@ retry: * it will tell us. */ next_snap_ino = 0; - /* save some space for the snaprealm count */ - pnum_realms = p; - ceph_decode_need(&p, end, sizeof(*pnum_realms), needmore); - p += sizeof(*pnum_realms); - num_realms = 0; while (1) { struct ceph_snap_realm *realm; - struct ceph_mds_snaprealm_reconnect *sr_rec; + struct ceph_mds_snaprealm_reconnect sr_rec; got = radix_tree_gang_lookup(&mdsc->snap_realms, (void **)&realm, next_snap_ino, 1); if (!got) @@ -2151,22 +2114,19 @@ retry: dout(" adding snap realm %llx seq %lld parent %llx\n", realm->ino, realm->seq, realm->parent_ino); - ceph_decode_need(&p, end, sizeof(*sr_rec), needmore); - sr_rec = p; - sr_rec->ino = cpu_to_le64(realm->ino); - sr_rec->seq = cpu_to_le64(realm->seq); - sr_rec->parent = cpu_to_le64(realm->parent_ino); - p += sizeof(*sr_rec); - num_realms++; + sr_rec.ino = cpu_to_le64(realm->ino); + sr_rec.seq = cpu_to_le64(realm->seq); + sr_rec.parent = cpu_to_le64(realm->parent_ino); + err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec)); + if (err) + goto fail; next_snap_ino = realm->ino + 1; } - *pnum_realms = cpu_to_le32(num_realms); send: - reply->front.iov_len = p - reply->front.iov_base; - reply->hdr.front_len = cpu_to_le32(reply->front.iov_len); - dout("final len was %u (guessed %d)\n", - (unsigned)reply->front.iov_len, len); + reply->pagelist = pagelist; + reply->hdr.data_len = cpu_to_le32(pagelist->length); + reply->nr_pages = calc_pages_for(0, pagelist->length); ceph_con_send(&session->s_con, reply); if (session) { @@ -2183,18 +2143,14 @@ out: mutex_lock(&mdsc->mutex); return; -needmore: - /* - * we need a larger buffer. this doesn't very accurately - * factor in snap realms, but it's safe. - */ - num_caps += num_realms; - newlen = len * ((100 * (session->s_nr_caps+3)) / (num_caps + 1)) / 100; - pr_info("i guessed %d, and did %d of %d caps, retrying with %d\n", - len, num_caps, session->s_nr_caps, newlen); - len = newlen; +fail: ceph_msg_put(reply); - goto retry; +fail_nomsg: + ceph_pagelist_release(pagelist); + kfree(pagelist); +fail_nopagelist: + pr_err("ENOMEM preparing reconnect for mds%d\n", mds); + goto out; } -- cgit v1.2.3 From 6a4ef48103a78a46b80e07fcd8ac4edda0c7128f Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Thu, 31 Dec 2009 12:04:58 -0800 Subject: ceph: fix copy_user_to_page_vector() The function was broken in the case where there was more than one page involved, broke the ceph sync_write case. Signed-off-by: Yehuda Sadeh Signed-off-by: Sage Weil --- fs/ceph/file.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/fs/ceph/file.c b/fs/ceph/file.c index fc8aff4767d3..2d88c805a56c 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -350,10 +350,10 @@ static int copy_user_to_page_vector(struct page **pages, return -EFAULT; data += l - bad; left -= l - bad; - if (po) { - po += l - bad; - if (po == PAGE_CACHE_SIZE) - po = 0; + po += l - bad; + if (po == PAGE_CACHE_SIZE) { + po = 0; + i++; } } return len; -- cgit v1.2.3 From 145434bee45bd353f9a93e9b411f7aa7cc677c08 Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Mon, 11 Jan 2010 13:46:31 +0000 Subject: uwb: wlp: refactor wlp_get_() macros Refactor the wlp_get_() macros to call a common function. This save over 4k of space and remove a spurious uninitialized variable warning with some versions of gcc. Signed-off-by: David Vrabel --- drivers/uwb/wlp/messages.c | 106 ++++++++++++++++++++++++++------------------- 1 file changed, 61 insertions(+), 45 deletions(-) diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c index aa42fcee4c4f..75164866c2d8 100644 --- a/drivers/uwb/wlp/messages.c +++ b/drivers/uwb/wlp/messages.c @@ -259,6 +259,63 @@ out: } +static ssize_t wlp_get_attribute(struct wlp *wlp, u16 type_code, + struct wlp_attr_hdr *attr_hdr, void *value, ssize_t value_len, + ssize_t buflen) +{ + struct device *dev = &wlp->rc->uwb_dev.dev; + ssize_t attr_len = sizeof(*attr_hdr) + value_len; + if (buflen < 0) + return -EINVAL; + if (buflen < attr_len) { + dev_err(dev, "WLP: Not enough space in buffer to parse" + " attribute field. Need %d, received %zu\n", + (int)attr_len, buflen); + return -EIO; + } + if (wlp_check_attr_hdr(wlp, attr_hdr, type_code, value_len) < 0) { + dev_err(dev, "WLP: Header verification failed. \n"); + return -EINVAL; + } + memcpy(value, (void *)attr_hdr + sizeof(*attr_hdr), value_len); + return attr_len; +} + +static ssize_t wlp_vget_attribute(struct wlp *wlp, u16 type_code, + struct wlp_attr_hdr *attr_hdr, void *value, ssize_t max_value_len, + ssize_t buflen) +{ + struct device *dev = &wlp->rc->uwb_dev.dev; + size_t len; + if (buflen < 0) + return -EINVAL; + if (buflen < sizeof(*attr_hdr)) { + dev_err(dev, "WLP: Not enough space in buffer to parse" + " header.\n"); + return -EIO; + } + if (le16_to_cpu(attr_hdr->type) != type_code) { + dev_err(dev, "WLP: Unexpected attribute type. Got %u, " + "expected %u.\n", le16_to_cpu(attr_hdr->type), + type_code); + return -EINVAL; + } + len = le16_to_cpu(attr_hdr->length); + if (len > max_value_len) { + dev_err(dev, "WLP: Attribute larger than maximum " + "allowed. Received %zu, max is %d.\n", len, + (int)max_value_len); + return -EFBIG; + } + if (buflen < sizeof(*attr_hdr) + len) { + dev_err(dev, "WLP: Not enough space in buffer to parse " + "variable data.\n"); + return -EIO; + } + memcpy(value, (void *)attr_hdr + sizeof(*attr_hdr), len); + return sizeof(*attr_hdr) + len; +} + /** * Get value of attribute from fixed size attribute field. * @@ -274,22 +331,8 @@ out: ssize_t wlp_get_##name(struct wlp *wlp, struct wlp_attr_##name *attr, \ type *value, ssize_t buflen) \ { \ - struct device *dev = &wlp->rc->uwb_dev.dev; \ - if (buflen < 0) \ - return -EINVAL; \ - if (buflen < sizeof(*attr)) { \ - dev_err(dev, "WLP: Not enough space in buffer to parse" \ - " attribute field. Need %d, received %zu\n", \ - (int)sizeof(*attr), buflen); \ - return -EIO; \ - } \ - if (wlp_check_attr_hdr(wlp, &attr->hdr, type_code, \ - sizeof(attr->name)) < 0) { \ - dev_err(dev, "WLP: Header verification failed. \n"); \ - return -EINVAL; \ - } \ - *value = attr->name; \ - return sizeof(*attr); \ + return wlp_get_attribute(wlp, (type_code), &attr->hdr, \ + value, sizeof(*value), buflen); \ } #define wlp_get_sparse(type, type_code, name) \ @@ -313,35 +356,8 @@ static ssize_t wlp_get_##name(struct wlp *wlp, \ struct wlp_attr_##name *attr, \ type_val *value, ssize_t buflen) \ { \ - struct device *dev = &wlp->rc->uwb_dev.dev; \ - size_t len; \ - if (buflen < 0) \ - return -EINVAL; \ - if (buflen < sizeof(*attr)) { \ - dev_err(dev, "WLP: Not enough space in buffer to parse" \ - " header.\n"); \ - return -EIO; \ - } \ - if (le16_to_cpu(attr->hdr.type) != type_code) { \ - dev_err(dev, "WLP: Unexpected attribute type. Got %u, " \ - "expected %u.\n", le16_to_cpu(attr->hdr.type), \ - type_code); \ - return -EINVAL; \ - } \ - len = le16_to_cpu(attr->hdr.length); \ - if (len > max) { \ - dev_err(dev, "WLP: Attribute larger than maximum " \ - "allowed. Received %zu, max is %d.\n", len, \ - (int)max); \ - return -EFBIG; \ - } \ - if (buflen < sizeof(*attr) + len) { \ - dev_err(dev, "WLP: Not enough space in buffer to parse "\ - "variable data.\n"); \ - return -EIO; \ - } \ - memcpy(value, (void *) attr + sizeof(*attr), len); \ - return sizeof(*attr) + len; \ + return wlp_vget_attribute(wlp, (type_code), &attr->hdr, \ + value, (max), buflen); \ } wlp_get(u8, WLP_ATTR_WLP_VER, version) -- cgit v1.2.3 From 34446d05dd255b34518c76d2b8760161e63fe0c1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20N=C3=A9meth?= Date: Tue, 12 Jan 2010 08:49:14 +0100 Subject: uwb: make USB device id table constant MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The id_table field of the struct usb_device_id is constant in so it is worth to make the initialization data also constant. Signed-off-by: Márton Németh Signed-off-by: David Vrabel --- drivers/uwb/hwa-rc.c | 2 +- drivers/uwb/i1480/dfu/usb.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c index e7eeb63fab23..b409c228f254 100644 --- a/drivers/uwb/hwa-rc.c +++ b/drivers/uwb/hwa-rc.c @@ -891,7 +891,7 @@ static int hwarc_post_reset(struct usb_interface *iface) } /** USB device ID's that we handle */ -static struct usb_device_id hwarc_id_table[] = { +static const struct usb_device_id hwarc_id_table[] = { /* D-Link DUB-1210 */ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3d02, 0xe0, 0x01, 0x02), .driver_info = WUSB_QUIRK_WHCI_CMD_EVT }, diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c index 0bb665a0c024..08f9a7b95c42 100644 --- a/drivers/uwb/i1480/dfu/usb.c +++ b/drivers/uwb/i1480/dfu/usb.c @@ -430,7 +430,7 @@ error: /** USB device ID's that we handle */ -static struct usb_device_id i1480_usb_id_table[] = { +static const struct usb_device_id i1480_usb_id_table[] = { i1480_USB_DEV(0x8086, 0xdf3b), i1480_USB_DEV(0x15a9, 0x0005), i1480_USB_DEV(0x07d1, 0x3802), -- cgit v1.2.3 From 35fb2a816a06ded2a3ff83d896c34b83c8e1d556 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Wed, 13 Jan 2010 23:41:50 +0000 Subject: uwb: declare MODULE_FIRMWARE() in i1480 DFU driver Signed-off-by: Ben Hutchings Signed-off-by: David Vrabel --- drivers/uwb/i1480/dfu/usb.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c index 08f9a7b95c42..a6a93755627e 100644 --- a/drivers/uwb/i1480/dfu/usb.c +++ b/drivers/uwb/i1480/dfu/usb.c @@ -413,6 +413,10 @@ error: return result; } +MODULE_FIRMWARE("i1480-pre-phy-0.0.bin"); +MODULE_FIRMWARE("i1480-usb-0.0.bin"); +MODULE_FIRMWARE("i1480-phy-0.0.bin"); + #define i1480_USB_DEV(v, p) \ { \ .match_flags = USB_DEVICE_ID_MATCH_DEVICE \ -- cgit v1.2.3 From 4baa75ef0ed29adae03fcbbaa9aca1511a5a8cc9 Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Thu, 7 Jan 2010 15:36:32 -0800 Subject: ceph: change dentry offset and position after splice_dentry This fixes a bug, where we had the parent list have dentries with offsets that are not monotonically increasing, which caused the ceph dcache_readdir to skip entries. Signed-off-by: Yehuda Sadeh Signed-off-by: Sage Weil --- fs/ceph/inode.c | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 8774b2811597..518beb628f09 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -816,6 +816,33 @@ out: return dn; } +/* + * Set dentry's directory position based on the current dir's max, and + * order it in d_subdirs, so that dcache_readdir behaves. + */ +static void ceph_set_dentry_offset(struct dentry *dn) +{ + struct dentry *dir = dn->d_parent; + struct inode *inode = dn->d_parent->d_inode; + struct ceph_dentry_info *di; + + BUG_ON(!inode); + + di = ceph_dentry(dn); + + spin_lock(&inode->i_lock); + di->offset = ceph_inode(inode)->i_max_offset++; + spin_unlock(&inode->i_lock); + + spin_lock(&dcache_lock); + spin_lock(&dn->d_lock); + list_move_tail(&dir->d_subdirs, &dn->d_u.d_child); + dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset, + dn->d_u.d_child.prev, dn->d_u.d_child.next); + spin_unlock(&dn->d_lock); + spin_unlock(&dcache_lock); +} + /* * Incorporate results into the local cache. This is either just * one inode, or a directory, dentry, and possibly linked-to inode (e.g., @@ -987,6 +1014,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, goto done; } req->r_dentry = dn; /* may have spliced */ + ceph_set_dentry_offset(dn); igrab(in); } else if (ceph_ino(in) == vino.ino && ceph_snap(in) == vino.snap) { @@ -1029,6 +1057,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, err = PTR_ERR(dn); goto done; } + ceph_set_dentry_offset(dn); req->r_dentry = dn; /* may have spliced */ igrab(in); rinfo->head->is_dentry = 1; /* fool notrace handlers */ -- cgit v1.2.3 From 103e2d3ae57d38d18aaac1b327266c1407499ac1 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Thu, 7 Jan 2010 16:12:36 -0800 Subject: ceph: remove unused erank field The ceph_entity_addr erank field is obsolete; remove it. Get rid of trivial addr comparison helpers while we're at it. Signed-off-by: Sage Weil --- fs/ceph/messenger.c | 19 ++++++++----------- fs/ceph/mon_client.c | 3 +-- fs/ceph/msgr.h | 18 ++---------------- fs/ceph/osd_client.c | 7 ++++--- 4 files changed, 15 insertions(+), 32 deletions(-) diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index c1106e8360f0..1360708d7505 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -1056,16 +1056,15 @@ static int process_banner(struct ceph_connection *con) * end may not yet know their ip address, so if it's 0.0.0.0, give * them the benefit of the doubt. */ - if (!ceph_entity_addr_is_local(&con->peer_addr, - &con->actual_peer_addr) && + if (memcmp(&con->peer_addr, &con->actual_peer_addr, + sizeof(con->peer_addr)) != 0 && !(addr_is_blank(&con->actual_peer_addr.in_addr) && con->actual_peer_addr.nonce == con->peer_addr.nonce)) { - pr_warning("wrong peer, want %s/%d, " - "got %s/%d\n", - pr_addr(&con->peer_addr.in_addr), - con->peer_addr.nonce, - pr_addr(&con->actual_peer_addr.in_addr), - con->actual_peer_addr.nonce); + pr_warning("wrong peer, want %s/%lld, got %s/%lld\n", + pr_addr(&con->peer_addr.in_addr), + le64_to_cpu(con->peer_addr.nonce), + pr_addr(&con->actual_peer_addr.in_addr), + le64_to_cpu(con->actual_peer_addr.nonce)); con->error_msg = "wrong peer at address"; return -1; } @@ -1934,8 +1933,7 @@ struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr) msgr->inst.addr = *myaddr; /* select a random nonce */ - get_random_bytes(&msgr->inst.addr.nonce, - sizeof(msgr->inst.addr.nonce)); + get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce)); encode_my_addr(msgr); dout("messenger_create %p\n", msgr); @@ -1966,7 +1964,6 @@ void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg) msg->hdr.src.name = con->msgr->inst.name; msg->hdr.src.addr = con->msgr->my_enc_addr; msg->hdr.orig_src = msg->hdr.src; - msg->hdr.dst_erank = con->peer_addr.erank; /* queue */ mutex_lock(&con->mutex); diff --git a/fs/ceph/mon_client.c b/fs/ceph/mon_client.c index bb94006fc686..223e8bc207e3 100644 --- a/fs/ceph/mon_client.c +++ b/fs/ceph/mon_client.c @@ -88,7 +88,7 @@ int ceph_monmap_contains(struct ceph_monmap *m, struct ceph_entity_addr *addr) int i; for (i = 0; i < m->num_mon; i++) - if (ceph_entity_addr_equal(addr, &m->mon_inst[i].addr)) + if (memcmp(addr, &m->mon_inst[i].addr, sizeof(*addr)) == 0) return 1; return 0; } @@ -503,7 +503,6 @@ static int build_initial_monmap(struct ceph_mon_client *monc) return -ENOMEM; for (i = 0; i < num_mon; i++) { monc->monmap->mon_inst[i].addr = mon_addr[i]; - monc->monmap->mon_inst[i].addr.erank = 0; monc->monmap->mon_inst[i].addr.nonce = 0; monc->monmap->mon_inst[i].name.type = CEPH_ENTITY_TYPE_MON; diff --git a/fs/ceph/msgr.h b/fs/ceph/msgr.h index be83f93182ee..40b6189aa9e3 100644 --- a/fs/ceph/msgr.h +++ b/fs/ceph/msgr.h @@ -61,24 +61,10 @@ extern const char *ceph_entity_type_name(int type); * entity_addr -- network address */ struct ceph_entity_addr { - __le32 erank; /* entity's rank in process */ - __le32 nonce; /* unique id for process (e.g. pid) */ + __le64 nonce; /* unique id for process (e.g. pid) */ struct sockaddr_storage in_addr; } __attribute__ ((packed)); -static inline bool ceph_entity_addr_is_local(const struct ceph_entity_addr *a, - const struct ceph_entity_addr *b) -{ - return a->nonce == b->nonce && - memcmp(&a->in_addr, &b->in_addr, sizeof(a->in_addr)) == 0; -} - -static inline bool ceph_entity_addr_equal(const struct ceph_entity_addr *a, - const struct ceph_entity_addr *b) -{ - return memcmp(a, b, sizeof(*a)) == 0; -} - struct ceph_entity_inst { struct ceph_entity_name name; struct ceph_entity_addr addr; @@ -147,7 +133,7 @@ struct ceph_msg_header { receiver: mask against ~PAGE_MASK */ struct ceph_entity_inst src, orig_src; - __le32 dst_erank; + __le32 reserved; __le32 crc; /* header crc32c */ } __attribute__ ((packed)); diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index a0aac436d5d4..80b868f7a0fc 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -821,9 +821,10 @@ static void kick_requests(struct ceph_osd_client *osdc, n = rb_next(p); if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) || - !ceph_entity_addr_equal(&osd->o_con.peer_addr, - ceph_osd_addr(osdc->osdmap, - osd->o_osd))) + memcmp(&osd->o_con.peer_addr, + ceph_osd_addr(osdc->osdmap, + osd->o_osd), + sizeof(struct ceph_entity_addr)) != 0) reset_osd(osdc, osd); } } -- cgit v1.2.3 From 7740a42f816790583bd8a9079337772d511af3a3 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 8 Jan 2010 15:58:25 -0800 Subject: ceph: display pgid in debugfs osd request dump Signed-off-by: Sage Weil --- fs/ceph/debugfs.c | 6 ++++-- fs/ceph/osd_client.c | 2 ++ fs/ceph/osd_client.h | 1 + 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c index 22d3b47fb1be..fba44b2a6086 100644 --- a/fs/ceph/debugfs.c +++ b/fs/ceph/debugfs.c @@ -231,8 +231,10 @@ static int osdc_show(struct seq_file *s, void *pp) req = rb_entry(p, struct ceph_osd_request, r_node); - seq_printf(s, "%lld\tosd%d\t", req->r_tid, - req->r_osd ? req->r_osd->o_osd : -1); + seq_printf(s, "%lld\tosd%d\t%d.%x\t", req->r_tid, + req->r_osd ? req->r_osd->o_osd : -1, + le32_to_cpu(req->r_pgid.pool), + le16_to_cpu(req->r_pgid.ps)); head = req->r_request->front.iov_base; op = (void *)(head + 1); diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index 80b868f7a0fc..8417e21a3cb2 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -538,6 +538,8 @@ static int __map_osds(struct ceph_osd_client *osdc, if (err) return err; pgid = reqhead->layout.ol_pgid; + req->r_pgid = pgid; + o = ceph_calc_pg_primary(osdc->osdmap, pgid); if ((req->r_osd && req->r_osd->o_osd == o && diff --git a/fs/ceph/osd_client.h b/fs/ceph/osd_client.h index 8fef71cc4457..4162c6810a8f 100644 --- a/fs/ceph/osd_client.h +++ b/fs/ceph/osd_client.h @@ -42,6 +42,7 @@ struct ceph_osd_request { struct rb_node r_node; struct list_head r_osd_item; struct ceph_osd *r_osd; + struct ceph_pg r_pgid; struct ceph_connection *r_con_filling_pages; -- cgit v1.2.3 From ec7384ec23dc5a9ea8733e90438e16b6066bfe1b Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Wed, 20 Jan 2010 15:16:41 -0800 Subject: ceph: remove duplicate variable initialization The variable client is initialized twice to the same (side effect-free) expression. Drop one initialization. A simplified version of the semantic match that finds this problem is: (http://coccinelle.lip6.fr/) // @forall@ idexpression *x; identifier f!=ERR_PTR; @@ x = f(...) ... when != x ( x = f(...,<+...x...+>,...) | * x = f(...) ) // Signed-off-by: Julia Lawall Signed-off-by: Sage Weil --- fs/ceph/addr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index a6850a14038e..a3bd9deb555c 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -596,7 +596,7 @@ static int ceph_writepages_start(struct address_space *mapping, struct inode *inode = mapping->host; struct backing_dev_info *bdi = mapping->backing_dev_info; struct ceph_inode_info *ci = ceph_inode(inode); - struct ceph_client *client = ceph_inode_to_client(inode); + struct ceph_client *client; pgoff_t index, start, end; int range_whole = 0; int should_loop = 1; -- cgit v1.2.3 From 3ea25f9441fc0951ada649105f2c57a59536b539 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 25 Jan 2010 11:18:17 -0800 Subject: ceph: mark MDS CREATE as a write op CEPH_MDS_OP_CREATE was not correctly marked as a write operation. Signed-off-by: Sage Weil --- fs/ceph/ceph_fs.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/ceph/ceph_fs.h b/fs/ceph/ceph_fs.h index d0f2557bb41b..d8923fed8c2e 100644 --- a/fs/ceph/ceph_fs.h +++ b/fs/ceph/ceph_fs.h @@ -39,7 +39,7 @@ #define CEPH_MDS_PROTOCOL 9 /* cluster internal */ #define CEPH_MON_PROTOCOL 5 /* cluster internal */ #define CEPH_OSDC_PROTOCOL 22 /* server/client */ -#define CEPH_MDSC_PROTOCOL 31 /* server/client */ +#define CEPH_MDSC_PROTOCOL 32 /* server/client */ #define CEPH_MONC_PROTOCOL 15 /* server/client */ @@ -271,7 +271,7 @@ enum { CEPH_MDS_OP_RMDIR = 0x01221, CEPH_MDS_OP_SYMLINK = 0x01222, - CEPH_MDS_OP_CREATE = 0x00301, + CEPH_MDS_OP_CREATE = 0x01301, CEPH_MDS_OP_OPEN = 0x00302, CEPH_MDS_OP_READDIR = 0x00305, -- cgit v1.2.3 From 5b1daecd59f95eb24dc629407ed80369c9929520 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 25 Jan 2010 11:33:08 -0800 Subject: ceph: properly handle aborted mds requests Previously, if the MDS request was interrupted, we would unregister the request and ignore any reply. This could cause the caps or other cache state to become out of sync. (For instance, aborting dbench and doing rm -r on clients would complain about a non-empty directory because the client didn't realize it's aborted file create request completed.) Even we don't unregister, we still can't process the reply normally because we are no longer holding the caller's locks (like the dir i_mutex). So, mark aborted operations with r_aborted, and in the reply handler, be sure to process all the caps. Do not process the namespace changes, though, since we no longer will hold the dir i_mutex. The dentry lease state can also be ignored as it's more forgiving. Signed-off-by: Sage Weil --- fs/ceph/inode.c | 16 ++++++++++------ fs/ceph/mds_client.c | 28 +++++++++++++++++++++++----- fs/ceph/mds_client.h | 1 + 3 files changed, 34 insertions(+), 11 deletions(-) diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 518beb628f09..71e107fb4dbc 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -915,6 +915,16 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, } if (rinfo->head->is_dentry) { + struct inode *dir = req->r_locked_dir; + + err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag, + session, req->r_request_started, -1, + &req->r_caps_reservation); + if (err < 0) + return err; + } + + if (rinfo->head->is_dentry && !req->r_aborted) { /* * lookup link rename : null -> possibly existing inode * mknod symlink mkdir : null -> new inode @@ -932,12 +942,6 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, BUG_ON(ceph_snap(dir) != le64_to_cpu(rinfo->diri.in->snapid)); - err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag, - session, req->r_request_started, -1, - &req->r_caps_reservation); - if (err < 0) - return err; - /* do we have a lease on the whole dir? */ have_dir_cap = (le32_to_cpu(rinfo->diri.in->cap.caps) & diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 6e08f488a30f..623c67cd484b 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1624,11 +1624,29 @@ int ceph_mdsc_do_request(struct ceph_mds_client *mdsc, err = PTR_ERR(req->r_reply); req->r_reply = NULL; - /* clean up */ - __unregister_request(mdsc, req); - if (!list_empty(&req->r_unsafe_item)) - list_del_init(&req->r_unsafe_item); - complete(&req->r_safe_completion); + if (err == -ERESTARTSYS) { + /* aborted */ + req->r_aborted = true; + + if (req->r_locked_dir && + (req->r_op & CEPH_MDS_OP_WRITE)) { + struct ceph_inode_info *ci = + ceph_inode(req->r_locked_dir); + + dout("aborted, clearing I_COMPLETE on %p\n", + req->r_locked_dir); + spin_lock(&req->r_locked_dir->i_lock); + ci->i_ceph_flags &= ~CEPH_I_COMPLETE; + ci->i_release_count++; + spin_unlock(&req->r_locked_dir->i_lock); + } + } else { + /* clean up this request */ + __unregister_request(mdsc, req); + if (!list_empty(&req->r_unsafe_item)) + list_del_init(&req->r_unsafe_item); + complete(&req->r_safe_completion); + } } else if (req->r_err) { err = req->r_err; } else { diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index b1c2025227c5..ee71495e27c4 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -188,6 +188,7 @@ struct ceph_mds_request { struct ceph_msg *r_reply; struct ceph_mds_reply_info_parsed r_reply_info; int r_err; + bool r_aborted; unsigned long r_timeout; /* optional. jiffies */ unsigned long r_started; /* start time to measure timeout against */ -- cgit v1.2.3 From 2450418c47b7998ad55a73f23707b1e21c371eef Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Fri, 8 Jan 2010 13:58:34 -0800 Subject: ceph: allocate middle of message before stating to read Both front and middle parts of the message are now being allocated at the ceph_alloc_msg(). Signed-off-by: Yehuda Sadeh --- fs/ceph/mds_client.c | 2 - fs/ceph/messenger.c | 142 +++++++++++++++++++++++++++++---------------------- fs/ceph/messenger.h | 9 +--- fs/ceph/mon_client.c | 25 ++++++--- fs/ceph/osd_client.c | 17 ++++-- 5 files changed, 115 insertions(+), 80 deletions(-) diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 623c67cd484b..93998a0678c4 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -2953,8 +2953,6 @@ const static struct ceph_connection_operations mds_con_ops = { .get_authorizer = get_authorizer, .verify_authorizer_reply = verify_authorizer_reply, .peer_reset = peer_reset, - .alloc_msg = ceph_alloc_msg, - .alloc_middle = ceph_alloc_middle, }; diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index 1360708d7505..25de15c006b1 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -1279,8 +1279,34 @@ static void process_ack(struct ceph_connection *con) +static int read_partial_message_section(struct ceph_connection *con, + struct kvec *section, unsigned int sec_len, + u32 *crc) +{ + int left; + int ret; + + BUG_ON(!section); + + while (section->iov_len < sec_len) { + BUG_ON(section->iov_base == NULL); + left = sec_len - section->iov_len; + ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base + + section->iov_len, left); + if (ret <= 0) + return ret; + section->iov_len += ret; + if (section->iov_len == sec_len) + *crc = crc32c(0, section->iov_base, + section->iov_len); + } + return 1; +} +static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con, + struct ceph_msg_header *hdr, + int *skip); /* * read (part of) a message. */ @@ -1292,6 +1318,7 @@ static int read_partial_message(struct ceph_connection *con) int to, want, left; unsigned front_len, middle_len, data_len, data_off; int datacrc = con->msgr->nocrc; + int skip; dout("read_partial_message con %p msg %p\n", con, m); @@ -1315,7 +1342,6 @@ static int read_partial_message(struct ceph_connection *con) } } } - front_len = le32_to_cpu(con->in_hdr.front_len); if (front_len > CEPH_MSG_MAX_FRONT_LEN) return -EIO; @@ -1330,8 +1356,8 @@ static int read_partial_message(struct ceph_connection *con) if (!con->in_msg) { dout("got hdr type %d front %d data %d\n", con->in_hdr.type, con->in_hdr.front_len, con->in_hdr.data_len); - con->in_msg = con->ops->alloc_msg(con, &con->in_hdr); - if (!con->in_msg) { + con->in_msg = ceph_alloc_msg(con, &con->in_hdr, &skip); + if (skip) { /* skip this message */ pr_err("alloc_msg returned NULL, skipping message\n"); con->in_base_pos = -front_len - middle_len - data_len - @@ -1342,56 +1368,28 @@ static int read_partial_message(struct ceph_connection *con) if (IS_ERR(con->in_msg)) { ret = PTR_ERR(con->in_msg); con->in_msg = NULL; - con->error_msg = "out of memory for incoming message"; + con->error_msg = "error allocating memory for incoming message"; return ret; } m = con->in_msg; m->front.iov_len = 0; /* haven't read it yet */ + if (m->middle) + m->middle->vec.iov_len = 0; memcpy(&m->hdr, &con->in_hdr, sizeof(con->in_hdr)); } /* front */ - while (m->front.iov_len < front_len) { - BUG_ON(m->front.iov_base == NULL); - left = front_len - m->front.iov_len; - ret = ceph_tcp_recvmsg(con->sock, (char *)m->front.iov_base + - m->front.iov_len, left); - if (ret <= 0) - return ret; - m->front.iov_len += ret; - if (m->front.iov_len == front_len) - con->in_front_crc = crc32c(0, m->front.iov_base, - m->front.iov_len); - } + ret = read_partial_message_section(con, &m->front, front_len, + &con->in_front_crc); + if (ret <= 0) + return ret; /* middle */ - while (middle_len > 0 && (!m->middle || - m->middle->vec.iov_len < middle_len)) { - if (m->middle == NULL) { - ret = -EOPNOTSUPP; - if (con->ops->alloc_middle) - ret = con->ops->alloc_middle(con, m); - if (ret < 0) { - pr_err("alloc_middle fail skipping payload\n"); - con->in_base_pos = -middle_len - data_len - - sizeof(m->footer); - ceph_msg_put(con->in_msg); - con->in_msg = NULL; - con->in_tag = CEPH_MSGR_TAG_READY; - return 0; - } - m->middle->vec.iov_len = 0; - } - left = middle_len - m->middle->vec.iov_len; - ret = ceph_tcp_recvmsg(con->sock, - (char *)m->middle->vec.iov_base + - m->middle->vec.iov_len, left); + if (m->middle) { + ret = read_partial_message_section(con, &m->middle->vec, middle_len, + &con->in_middle_crc); if (ret <= 0) return ret; - m->middle->vec.iov_len += ret; - if (m->middle->vec.iov_len == middle_len) - con->in_middle_crc = crc32c(0, m->middle->vec.iov_base, - m->middle->vec.iov_len); } /* (page) data */ @@ -2115,24 +2113,6 @@ out: return ERR_PTR(-ENOMEM); } -/* - * Generic message allocator, for incoming messages. - */ -struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con, - struct ceph_msg_header *hdr) -{ - int type = le16_to_cpu(hdr->type); - int front_len = le32_to_cpu(hdr->front_len); - struct ceph_msg *msg = ceph_msg_new(type, front_len, 0, 0, NULL); - - if (!msg) { - pr_err("unable to allocate msg type %d len %d\n", - type, front_len); - return ERR_PTR(-ENOMEM); - } - return msg; -} - /* * Allocate "middle" portion of a message, if it is needed and wasn't * allocated by alloc_msg. This allows us to read a small fixed-size @@ -2140,7 +2120,7 @@ struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con, * propagate the error to the caller based on info in the front) when * the middle is too large. */ -int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg) +static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg) { int type = le16_to_cpu(msg->hdr.type); int middle_len = le32_to_cpu(msg->hdr.middle_len); @@ -2156,6 +2136,48 @@ int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg) return 0; } +/* + * Generic message allocator, for incoming messages. + */ +static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con, + struct ceph_msg_header *hdr, + int *skip) +{ + int type = le16_to_cpu(hdr->type); + int front_len = le32_to_cpu(hdr->front_len); + int middle_len = le32_to_cpu(hdr->middle_len); + struct ceph_msg *msg = NULL; + int ret; + + if (con->ops->alloc_msg) { + msg = con->ops->alloc_msg(con, hdr, skip); + if (IS_ERR(msg)) + return msg; + + if (*skip) + return NULL; + } + if (!msg) { + *skip = 0; + msg = ceph_msg_new(type, front_len, 0, 0, NULL); + if (!msg) { + pr_err("unable to allocate msg type %d len %d\n", + type, front_len); + return ERR_PTR(-ENOMEM); + } + } + + if (middle_len) { + ret = ceph_alloc_middle(con, msg); + + if (ret < 0) { + ceph_msg_put(msg); + return msg; + } + } + return msg; +} + /* * Free a generically kmalloc'd message. diff --git a/fs/ceph/messenger.h b/fs/ceph/messenger.h index a7b684145092..b6bec59056d7 100644 --- a/fs/ceph/messenger.h +++ b/fs/ceph/messenger.h @@ -44,9 +44,8 @@ struct ceph_connection_operations { void (*peer_reset) (struct ceph_connection *con); struct ceph_msg * (*alloc_msg) (struct ceph_connection *con, - struct ceph_msg_header *hdr); - int (*alloc_middle) (struct ceph_connection *con, - struct ceph_msg *msg); + struct ceph_msg_header *hdr, + int *skip); /* an incoming message has a data payload; tell me what pages I * should read the data into. */ int (*prepare_pages) (struct ceph_connection *con, struct ceph_msg *m, @@ -242,10 +241,6 @@ extern struct ceph_msg *ceph_msg_new(int type, int front_len, struct page **pages); extern void ceph_msg_kfree(struct ceph_msg *m); -extern struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con, - struct ceph_msg_header *hdr); -extern int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg); - static inline struct ceph_msg *ceph_msg_get(struct ceph_msg *msg) { diff --git a/fs/ceph/mon_client.c b/fs/ceph/mon_client.c index 223e8bc207e3..6c00b37cc554 100644 --- a/fs/ceph/mon_client.c +++ b/fs/ceph/mon_client.c @@ -692,21 +692,33 @@ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) * Allocate memory for incoming message */ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con, - struct ceph_msg_header *hdr) + struct ceph_msg_header *hdr, + int *skip) { struct ceph_mon_client *monc = con->private; int type = le16_to_cpu(hdr->type); - int front = le32_to_cpu(hdr->front_len); + int front_len = le32_to_cpu(hdr->front_len); + struct ceph_msg *m; + *skip = 0; switch (type) { case CEPH_MSG_MON_SUBSCRIBE_ACK: - return ceph_msgpool_get(&monc->msgpool_subscribe_ack, front); + m = ceph_msgpool_get(&monc->msgpool_subscribe_ack, front_len); + break; case CEPH_MSG_STATFS_REPLY: - return ceph_msgpool_get(&monc->msgpool_statfs_reply, front); + m = ceph_msgpool_get(&monc->msgpool_statfs_reply, front_len); + break; case CEPH_MSG_AUTH_REPLY: - return ceph_msgpool_get(&monc->msgpool_auth_reply, front); + m = ceph_msgpool_get(&monc->msgpool_auth_reply, front_len); + break; + default: + return NULL; } - return ceph_alloc_msg(con, hdr); + + if (!m) + *skip = 1; + + return m; } /* @@ -749,5 +761,4 @@ const static struct ceph_connection_operations mon_con_ops = { .dispatch = dispatch, .fault = mon_fault, .alloc_msg = mon_alloc_msg, - .alloc_middle = ceph_alloc_middle, }; diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index 8417e21a3cb2..545e93617993 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -1304,18 +1304,28 @@ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) } static struct ceph_msg *alloc_msg(struct ceph_connection *con, - struct ceph_msg_header *hdr) + struct ceph_msg_header *hdr, + int *skip) { struct ceph_osd *osd = con->private; struct ceph_osd_client *osdc = osd->o_osdc; int type = le16_to_cpu(hdr->type); int front = le32_to_cpu(hdr->front_len); + struct ceph_msg *m; + *skip = 0; switch (type) { case CEPH_MSG_OSD_OPREPLY: - return ceph_msgpool_get(&osdc->msgpool_op_reply, front); + m = ceph_msgpool_get(&osdc->msgpool_op_reply, front); + break; + default: + return NULL; } - return ceph_alloc_msg(con, hdr); + + if (!m) + *skip = 1; + + return m; } /* @@ -1390,6 +1400,5 @@ const static struct ceph_connection_operations osd_con_ops = { .verify_authorizer_reply = verify_authorizer_reply, .alloc_msg = alloc_msg, .fault = osd_reset, - .alloc_middle = ceph_alloc_middle, .prepare_pages = prepare_pages, }; -- cgit v1.2.3 From 9d7f0f139edfdce1a1539b100c617fd9182b0829 Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Mon, 11 Jan 2010 10:32:02 -0800 Subject: ceph: refactor messages data section allocation Signed-off-by: Yehuda Sadeh --- fs/ceph/messenger.c | 67 +++++++++++++++++++++++++++++++---------------------- 1 file changed, 39 insertions(+), 28 deletions(-) diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index 25de15c006b1..e8742cc9ecdf 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -1315,7 +1315,7 @@ static int read_partial_message(struct ceph_connection *con) struct ceph_msg *m = con->in_msg; void *p; int ret; - int to, want, left; + int to, left; unsigned front_len, middle_len, data_len, data_off; int datacrc = con->msgr->nocrc; int skip; @@ -1351,6 +1351,7 @@ static int read_partial_message(struct ceph_connection *con) data_len = le32_to_cpu(con->in_hdr.data_len); if (data_len > CEPH_MSG_MAX_DATA_LEN) return -EIO; + data_off = le16_to_cpu(con->in_hdr.data_off); /* allocate message? */ if (!con->in_msg) { @@ -1375,7 +1376,10 @@ static int read_partial_message(struct ceph_connection *con) m->front.iov_len = 0; /* haven't read it yet */ if (m->middle) m->middle->vec.iov_len = 0; - memcpy(&m->hdr, &con->in_hdr, sizeof(con->in_hdr)); + + con->in_msg_pos.page = 0; + con->in_msg_pos.page_pos = data_off & ~PAGE_MASK; + con->in_msg_pos.data_pos = 0; } /* front */ @@ -1393,31 +1397,6 @@ static int read_partial_message(struct ceph_connection *con) } /* (page) data */ - data_off = le16_to_cpu(m->hdr.data_off); - if (data_len == 0) - goto no_data; - - if (m->nr_pages == 0) { - con->in_msg_pos.page = 0; - con->in_msg_pos.page_pos = data_off & ~PAGE_MASK; - con->in_msg_pos.data_pos = 0; - /* find pages for data payload */ - want = calc_pages_for(data_off & ~PAGE_MASK, data_len); - ret = -1; - mutex_unlock(&con->mutex); - if (con->ops->prepare_pages) - ret = con->ops->prepare_pages(con, m, want); - mutex_lock(&con->mutex); - if (ret < 0) { - dout("%p prepare_pages failed, skipping payload\n", m); - con->in_base_pos = -data_len - sizeof(m->footer); - ceph_msg_put(con->in_msg); - con->in_msg = NULL; - con->in_tag = CEPH_MSGR_TAG_READY; - return 0; - } - BUG_ON(m->nr_pages < want); - } while (con->in_msg_pos.data_pos < data_len) { left = min((int)(data_len - con->in_msg_pos.data_pos), (int)(PAGE_SIZE - con->in_msg_pos.page_pos)); @@ -1440,7 +1419,6 @@ static int read_partial_message(struct ceph_connection *con) } } -no_data: /* footer */ to = sizeof(m->hdr) + sizeof(m->footer); while (con->in_base_pos < to) { @@ -2136,6 +2114,25 @@ static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg) return 0; } +static int ceph_alloc_data_section(struct ceph_connection *con, struct ceph_msg *msg) +{ + int ret; + int want; + int data_len = le32_to_cpu(msg->hdr.data_len); + unsigned data_off = le16_to_cpu(msg->hdr.data_off); + + want = calc_pages_for(data_off & ~PAGE_MASK, data_len); + ret = -1; + mutex_unlock(&con->mutex); + if (con->ops->prepare_pages) + ret = con->ops->prepare_pages(con, msg, want); + mutex_lock(&con->mutex); + + BUG_ON(msg->nr_pages < want); + + return ret; +} + /* * Generic message allocator, for incoming messages. */ @@ -2146,6 +2143,7 @@ static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con, int type = le16_to_cpu(hdr->type); int front_len = le32_to_cpu(hdr->front_len); int middle_len = le32_to_cpu(hdr->middle_len); + int data_len = le32_to_cpu(hdr->data_len); struct ceph_msg *msg = NULL; int ret; @@ -2166,6 +2164,7 @@ static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con, return ERR_PTR(-ENOMEM); } } + memcpy(&msg->hdr, &con->in_hdr, sizeof(con->in_hdr)); if (middle_len) { ret = ceph_alloc_middle(con, msg); @@ -2175,6 +2174,18 @@ static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con, return msg; } } + + if (data_len) { + ret = ceph_alloc_data_section(con, msg); + + if (ret < 0) { + *skip = 1; + ceph_msg_put(msg); + return NULL; + } + } + + return msg; } -- cgit v1.2.3 From 0547a9b30a5ac8680325752b61d3ffa9d4971b6e Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Mon, 11 Jan 2010 14:47:13 -0800 Subject: ceph: alloc message data pages and check if tid exists Now doing it in the same callback that is also responsible for allocating the 'front' part of the message. If we get a message that we haven't got a corresponding tid for, mark it for skipping. Moving the mutex unlock/lock from the osd alloc_msg callback to the calling function in the messenger. Signed-off-by: Yehuda Sadeh --- fs/ceph/messenger.c | 33 ++------------------------ fs/ceph/messenger.h | 4 ---- fs/ceph/mon_client.c | 1 + fs/ceph/osd_client.c | 66 +++++++++++++++++++++++++++++++++------------------- 4 files changed, 45 insertions(+), 59 deletions(-) diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index e8742cc9ecdf..f708803e6857 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -2114,25 +2114,6 @@ static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg) return 0; } -static int ceph_alloc_data_section(struct ceph_connection *con, struct ceph_msg *msg) -{ - int ret; - int want; - int data_len = le32_to_cpu(msg->hdr.data_len); - unsigned data_off = le16_to_cpu(msg->hdr.data_off); - - want = calc_pages_for(data_off & ~PAGE_MASK, data_len); - ret = -1; - mutex_unlock(&con->mutex); - if (con->ops->prepare_pages) - ret = con->ops->prepare_pages(con, msg, want); - mutex_lock(&con->mutex); - - BUG_ON(msg->nr_pages < want); - - return ret; -} - /* * Generic message allocator, for incoming messages. */ @@ -2143,12 +2124,13 @@ static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con, int type = le16_to_cpu(hdr->type); int front_len = le32_to_cpu(hdr->front_len); int middle_len = le32_to_cpu(hdr->middle_len); - int data_len = le32_to_cpu(hdr->data_len); struct ceph_msg *msg = NULL; int ret; if (con->ops->alloc_msg) { + mutex_unlock(&con->mutex); msg = con->ops->alloc_msg(con, hdr, skip); + mutex_lock(&con->mutex); if (IS_ERR(msg)) return msg; @@ -2175,17 +2157,6 @@ static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con, } } - if (data_len) { - ret = ceph_alloc_data_section(con, msg); - - if (ret < 0) { - *skip = 1; - ceph_msg_put(msg); - return NULL; - } - } - - return msg; } diff --git a/fs/ceph/messenger.h b/fs/ceph/messenger.h index b6bec59056d7..dca2d32b40de 100644 --- a/fs/ceph/messenger.h +++ b/fs/ceph/messenger.h @@ -46,10 +46,6 @@ struct ceph_connection_operations { struct ceph_msg * (*alloc_msg) (struct ceph_connection *con, struct ceph_msg_header *hdr, int *skip); - /* an incoming message has a data payload; tell me what pages I - * should read the data into. */ - int (*prepare_pages) (struct ceph_connection *con, struct ceph_msg *m, - int want); }; extern const char *ceph_name_type_str(int t); diff --git a/fs/ceph/mon_client.c b/fs/ceph/mon_client.c index 6c00b37cc554..3f7ae7f73c50 100644 --- a/fs/ceph/mon_client.c +++ b/fs/ceph/mon_client.c @@ -701,6 +701,7 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con, struct ceph_msg *m; *skip = 0; + switch (type) { case CEPH_MSG_MON_SUBSCRIBE_ACK: m = ceph_msgpool_get(&monc->msgpool_subscribe_ack, front_len); diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index 545e93617993..44abe299c69f 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -998,31 +998,26 @@ bad: * find those pages. * 0 = success, -1 failure. */ -static int prepare_pages(struct ceph_connection *con, struct ceph_msg *m, - int want) +static int prepare_pages(struct ceph_connection *con, + struct ceph_msg_header *hdr, + struct ceph_osd_request *req, + u64 tid, + struct ceph_msg *m) { struct ceph_osd *osd = con->private; struct ceph_osd_client *osdc; - struct ceph_osd_request *req; - u64 tid; int ret = -1; - int type = le16_to_cpu(m->hdr.type); + int data_len = le32_to_cpu(hdr->data_len); + unsigned data_off = le16_to_cpu(hdr->data_off); + + int want = calc_pages_for(data_off & ~PAGE_MASK, data_len); if (!osd) return -1; + osdc = osd->o_osdc; dout("prepare_pages on msg %p want %d\n", m, want); - if (unlikely(type != CEPH_MSG_OSD_OPREPLY)) - return -1; /* hmm! */ - - tid = le64_to_cpu(m->hdr.tid); - mutex_lock(&osdc->request_mutex); - req = __lookup_request(osdc, tid); - if (!req) { - dout("prepare_pages unknown tid %llu\n", tid); - goto out; - } dout("prepare_pages tid %llu has %d pages, want %d\n", tid, req->r_num_pages, want); if (unlikely(req->r_num_pages < want)) @@ -1040,7 +1035,8 @@ static int prepare_pages(struct ceph_connection *con, struct ceph_msg *m, m->nr_pages = req->r_num_pages; ret = 0; /* success */ out: - mutex_unlock(&osdc->request_mutex); + BUG_ON(ret < 0 || m->nr_pages < want); + return ret; } @@ -1311,19 +1307,42 @@ static struct ceph_msg *alloc_msg(struct ceph_connection *con, struct ceph_osd_client *osdc = osd->o_osdc; int type = le16_to_cpu(hdr->type); int front = le32_to_cpu(hdr->front_len); + int data_len = le32_to_cpu(hdr->data_len); struct ceph_msg *m; + struct ceph_osd_request *req; + u64 tid; + int err; *skip = 0; - switch (type) { - case CEPH_MSG_OSD_OPREPLY: - m = ceph_msgpool_get(&osdc->msgpool_op_reply, front); - break; - default: + if (type != CEPH_MSG_OSD_OPREPLY) return NULL; - } - if (!m) + tid = le64_to_cpu(hdr->tid); + mutex_lock(&osdc->request_mutex); + req = __lookup_request(osdc, tid); + if (!req) { + *skip = 1; + m = NULL; + dout("prepare_pages unknown tid %llu\n", tid); + goto out; + } + m = ceph_msgpool_get(&osdc->msgpool_op_reply, front); + if (!m) { *skip = 1; + goto out; + } + + if (data_len > 0) { + err = prepare_pages(con, hdr, req, tid, m); + if (err < 0) { + *skip = 1; + ceph_msg_put(m); + m = ERR_PTR(err); + } + } + +out: + mutex_unlock(&osdc->request_mutex); return m; } @@ -1400,5 +1419,4 @@ const static struct ceph_connection_operations osd_con_ops = { .verify_authorizer_reply = verify_authorizer_reply, .alloc_msg = alloc_msg, .fault = osd_reset, - .prepare_pages = prepare_pages, }; -- cgit v1.2.3 From 0d59ab81c3d3adf466c3fd37d7fb6d46b05d1fd4 Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Wed, 13 Jan 2010 17:03:23 -0800 Subject: ceph: keep reserved replies on the request structure This includes treating all the data preallocation and revokation at the same place, not having to have a special case for the reserved pages. Signed-off-by: Yehuda Sadeh --- fs/ceph/messenger.c | 20 ++++----- fs/ceph/messenger.h | 4 +- fs/ceph/osd_client.c | 118 ++++++++++++++++++++++++++++++++++++--------------- fs/ceph/osd_client.h | 8 ++-- 4 files changed, 100 insertions(+), 50 deletions(-) diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index f708803e6857..81bc779adb90 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -1985,30 +1985,30 @@ void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg) } /* - * Revoke a page vector that we may be reading data into + * Revoke a message that we may be reading data into */ -void ceph_con_revoke_pages(struct ceph_connection *con, struct page **pages) +void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg) { mutex_lock(&con->mutex); - if (con->in_msg && con->in_msg->pages == pages) { + if (con->in_msg && con->in_msg == msg) { + unsigned front_len = le32_to_cpu(con->in_hdr.front_len); + unsigned middle_len = le32_to_cpu(con->in_hdr.middle_len); unsigned data_len = le32_to_cpu(con->in_hdr.data_len); /* skip rest of message */ - dout("con_revoke_pages %p msg %p pages %p revoked\n", con, - con->in_msg, pages); - if (con->in_msg_pos.data_pos < data_len) - con->in_base_pos = con->in_msg_pos.data_pos - data_len; - else + dout("con_revoke_pages %p msg %p revoked\n", con, msg); con->in_base_pos = con->in_base_pos - sizeof(struct ceph_msg_header) - + front_len - + middle_len - + data_len - sizeof(struct ceph_msg_footer); - con->in_msg->pages = NULL; ceph_msg_put(con->in_msg); con->in_msg = NULL; con->in_tag = CEPH_MSGR_TAG_READY; } else { dout("con_revoke_pages %p msg %p pages %p no-op\n", - con, con->in_msg, pages); + con, con->in_msg, msg); } mutex_unlock(&con->mutex); } diff --git a/fs/ceph/messenger.h b/fs/ceph/messenger.h index dca2d32b40de..c26a3d8aa78c 100644 --- a/fs/ceph/messenger.h +++ b/fs/ceph/messenger.h @@ -226,8 +226,8 @@ extern void ceph_con_open(struct ceph_connection *con, extern void ceph_con_close(struct ceph_connection *con); extern void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg); extern void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg); -extern void ceph_con_revoke_pages(struct ceph_connection *con, - struct page **pages); +extern void ceph_con_revoke_message(struct ceph_connection *con, + struct ceph_msg *msg); extern void ceph_con_keepalive(struct ceph_connection *con); extern struct ceph_connection *ceph_con_get(struct ceph_connection *con); extern void ceph_con_put(struct ceph_connection *con); diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index 44abe299c69f..df2106839713 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -13,6 +13,8 @@ #include "decode.h" #include "auth.h" +#define OSD_REPLY_RESERVE_FRONT_LEN 512 + const static struct ceph_connection_operations osd_con_ops; static void kick_requests(struct ceph_osd_client *osdc, struct ceph_osd *osd); @@ -73,6 +75,16 @@ static void calc_layout(struct ceph_osd_client *osdc, req->r_oid, req->r_oid_len, objoff, objlen, req->r_num_pages); } +static void remove_replies(struct ceph_osd_request *req) +{ + int i; + int max = ARRAY_SIZE(req->replies); + + for (i=0; ireplies[i]) + ceph_msg_put(req->replies[i]); + } +} /* * requests @@ -87,12 +99,13 @@ void ceph_osdc_release_request(struct kref *kref) ceph_msg_put(req->r_request); if (req->r_reply) ceph_msg_put(req->r_reply); - if (req->r_con_filling_pages) { + remove_replies(req); + if (req->r_con_filling_msg) { dout("release_request revoking pages %p from con %p\n", - req->r_pages, req->r_con_filling_pages); - ceph_con_revoke_pages(req->r_con_filling_pages, - req->r_pages); - ceph_con_put(req->r_con_filling_pages); + req->r_pages, req->r_con_filling_msg); + ceph_con_revoke_message(req->r_con_filling_msg, + req->r_reply); + ceph_con_put(req->r_con_filling_msg); } if (req->r_own_pages) ceph_release_page_vector(req->r_pages, @@ -104,6 +117,60 @@ void ceph_osdc_release_request(struct kref *kref) kfree(req); } +static int alloc_replies(struct ceph_osd_request *req, int num_reply) +{ + int i; + int max = ARRAY_SIZE(req->replies); + + BUG_ON(num_reply > max); + + for (i=0; ireplies[i] = ceph_msg_new(0, OSD_REPLY_RESERVE_FRONT_LEN, 0, 0, NULL); + if (IS_ERR(req->replies[i])) { + int j; + int err = PTR_ERR(req->replies[i]); + for (j = 0; j<=i; j++) { + ceph_msg_put(req->replies[j]); + } + return err; + } + } + + for (; ireplies[i] = NULL; + } + + req->cur_reply = 0; + + return 0; +} + +static struct ceph_msg *__get_next_reply(struct ceph_connection *con, + struct ceph_osd_request *req, + int front_len) +{ + struct ceph_msg *reply; + if (req->r_con_filling_msg) { + dout("revoking reply msg %p from old con %p\n", req->r_reply, + req->r_con_filling_msg); + ceph_con_revoke_message(req->r_con_filling_msg, req->r_reply); + ceph_con_put(req->r_con_filling_msg); + req->cur_reply = 0; + } + reply = req->replies[req->cur_reply]; + if (!reply || front_len > OSD_REPLY_RESERVE_FRONT_LEN) { + /* maybe we can allocate it now? */ + reply = ceph_msg_new(0, front_len, 0, 0, NULL); + if (!reply || IS_ERR(reply)) { + pr_err(" reply alloc failed, front_len=%d\n", front_len); + return ERR_PTR(-ENOMEM); + } + } + req->r_con_filling_msg = ceph_con_get(con); + req->r_reply = ceph_msg_get(reply); /* for duration of read over socket */ + return ceph_msg_get(reply); +} + /* * build new request AND message, calculate layout, and adjust file * extent as needed. @@ -147,7 +214,7 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, if (req == NULL) return ERR_PTR(-ENOMEM); - err = ceph_msgpool_resv(&osdc->msgpool_op_reply, num_reply); + err = alloc_replies(req, num_reply); if (err) { ceph_osdc_put_request(req); return ERR_PTR(-ENOMEM); @@ -173,7 +240,6 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, else msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, 0, 0, NULL); if (IS_ERR(msg)) { - ceph_msgpool_resv(&osdc->msgpool_op_reply, -num_reply); ceph_osdc_put_request(req); return ERR_PTR(PTR_ERR(msg)); } @@ -471,8 +537,6 @@ static void __unregister_request(struct ceph_osd_client *osdc, rb_erase(&req->r_node, &osdc->requests); osdc->num_requests--; - ceph_msgpool_resv(&osdc->msgpool_op_reply, -req->r_num_prealloc_reply); - if (req->r_osd) { /* make sure the original request isn't in flight. */ ceph_con_revoke(&req->r_osd->o_con, req->r_request); @@ -724,12 +788,12 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, flags = le32_to_cpu(rhead->flags); /* - * if this connection filled our pages, drop our reference now, to + * if this connection filled our message, drop our reference now, to * avoid a (safe but slower) revoke later. */ - if (req->r_con_filling_pages == con && req->r_pages == msg->pages) { - dout(" got pages, dropping con_filling_pages ref %p\n", con); - req->r_con_filling_pages = NULL; + if (req->r_con_filling_msg == con && req->r_reply == msg) { + dout(" got pages, dropping con_filling_msg ref %p\n", con); + req->r_con_filling_msg = NULL; ceph_con_put(con); } @@ -998,7 +1062,7 @@ bad: * find those pages. * 0 = success, -1 failure. */ -static int prepare_pages(struct ceph_connection *con, +static int __prepare_pages(struct ceph_connection *con, struct ceph_msg_header *hdr, struct ceph_osd_request *req, u64 tid, @@ -1017,20 +1081,10 @@ static int prepare_pages(struct ceph_connection *con, osdc = osd->o_osdc; - dout("prepare_pages on msg %p want %d\n", m, want); - dout("prepare_pages tid %llu has %d pages, want %d\n", + dout("__prepare_pages on msg %p tid %llu, has %d pages, want %d\n", m, tid, req->r_num_pages, want); if (unlikely(req->r_num_pages < want)) goto out; - - if (req->r_con_filling_pages) { - dout("revoking pages %p from old con %p\n", req->r_pages, - req->r_con_filling_pages); - ceph_con_revoke_pages(req->r_con_filling_pages, req->r_pages); - ceph_con_put(req->r_con_filling_pages); - } - req->r_con_filling_pages = ceph_con_get(con); - req->r_reply = ceph_msg_get(m); /* for duration of read over socket */ m->pages = req->r_pages; m->nr_pages = req->r_num_pages; ret = 0; /* success */ @@ -1164,13 +1218,8 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client) err = ceph_msgpool_init(&osdc->msgpool_op, 4096, 10, true); if (err < 0) goto out_mempool; - err = ceph_msgpool_init(&osdc->msgpool_op_reply, 512, 0, false); - if (err < 0) - goto out_msgpool; return 0; -out_msgpool: - ceph_msgpool_destroy(&osdc->msgpool_op); out_mempool: mempool_destroy(osdc->req_mempool); out: @@ -1186,7 +1235,6 @@ void ceph_osdc_stop(struct ceph_osd_client *osdc) } mempool_destroy(osdc->req_mempool); ceph_msgpool_destroy(&osdc->msgpool_op); - ceph_msgpool_destroy(&osdc->msgpool_op_reply); } /* @@ -1323,17 +1371,17 @@ static struct ceph_msg *alloc_msg(struct ceph_connection *con, if (!req) { *skip = 1; m = NULL; - dout("prepare_pages unknown tid %llu\n", tid); + dout("alloc_msg unknown tid %llu\n", tid); goto out; } - m = ceph_msgpool_get(&osdc->msgpool_op_reply, front); - if (!m) { + m = __get_next_reply(con, req, front); + if (!m || IS_ERR(m)) { *skip = 1; goto out; } if (data_len > 0) { - err = prepare_pages(con, hdr, req, tid, m); + err = __prepare_pages(con, hdr, req, tid, m); if (err < 0) { *skip = 1; ceph_msg_put(m); diff --git a/fs/ceph/osd_client.h b/fs/ceph/osd_client.h index 4162c6810a8f..8d533d9406ff 100644 --- a/fs/ceph/osd_client.h +++ b/fs/ceph/osd_client.h @@ -44,7 +44,7 @@ struct ceph_osd_request { struct ceph_osd *r_osd; struct ceph_pg r_pgid; - struct ceph_connection *r_con_filling_pages; + struct ceph_connection *r_con_filling_msg; struct ceph_msg *r_request, *r_reply; int r_result; @@ -75,6 +75,9 @@ struct ceph_osd_request { struct page **r_pages; /* pages for data payload */ int r_pages_from_pool; int r_own_pages; /* if true, i own page list */ + + struct ceph_msg *replies[2]; + int cur_reply; }; struct ceph_osd_client { @@ -98,8 +101,7 @@ struct ceph_osd_client { mempool_t *req_mempool; - struct ceph_msgpool msgpool_op; - struct ceph_msgpool msgpool_op_reply; + struct ceph_msgpool msgpool_op; }; extern int ceph_osdc_init(struct ceph_osd_client *osdc, -- cgit v1.2.3 From 361be8601d78e488b5249032cc4e779b81d7928e Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 25 Jan 2010 16:03:02 -0800 Subject: ceph: precede encoded ceph_pg_pool struct with version Signed-off-by: Sage Weil --- fs/ceph/ceph_fs.h | 2 +- fs/ceph/osdmap.c | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/fs/ceph/ceph_fs.h b/fs/ceph/ceph_fs.h index d8923fed8c2e..f3bfc3c4f6e6 100644 --- a/fs/ceph/ceph_fs.h +++ b/fs/ceph/ceph_fs.h @@ -38,7 +38,7 @@ #define CEPH_OSD_PROTOCOL 8 /* cluster internal */ #define CEPH_MDS_PROTOCOL 9 /* cluster internal */ #define CEPH_MON_PROTOCOL 5 /* cluster internal */ -#define CEPH_OSDC_PROTOCOL 22 /* server/client */ +#define CEPH_OSDC_PROTOCOL 23 /* server/client */ #define CEPH_MDSC_PROTOCOL 32 /* server/client */ #define CEPH_MONC_PROTOCOL 15 /* server/client */ diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c index 0dbd606e21c4..a143c51c2cfb 100644 --- a/fs/ceph/osdmap.c +++ b/fs/ceph/osdmap.c @@ -414,6 +414,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) struct ceph_osdmap *map; u16 version; u32 len, max, i; + u8 ev; int err = -EINVAL; void *start = *p; @@ -441,10 +442,11 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) } ceph_decode_32_safe(p, end, max, bad); while (max--) { - ceph_decode_need(p, end, 4+sizeof(map->pg_pool->v), bad); + ceph_decode_need(p, end, 4+1+sizeof(map->pg_pool->v), bad); i = ceph_decode_32(p); if (i >= map->num_pools) goto bad; + ev = ceph_decode_8(p); /* encoding version */ ceph_decode_copy(p, &map->pg_pool[i].v, sizeof(map->pg_pool->v)); calc_pg_masks(&map->pg_pool[i]); @@ -603,6 +605,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, /* new_pool */ ceph_decode_32_safe(p, end, len, bad); while (len--) { + __u8 ev; + ceph_decode_32_safe(p, end, pool, bad); if (pool >= map->num_pools) { void *pg_pool = kcalloc(pool + 1, @@ -618,6 +622,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, map->pg_pool = pg_pool; map->num_pools = pool+1; } + ceph_decode_need(p, end, 1 + sizeof(map->pg_pool->v), bad); + ev = ceph_decode_8(p); /* encoding version */ ceph_decode_copy(p, &map->pg_pool[pool].v, sizeof(map->pg_pool->v)); calc_pg_masks(&map->pg_pool[pool]); -- cgit v1.2.3 From ac8839d7b264d0fa478fca7c4f9b6bb833540a80 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 27 Jan 2010 14:28:10 -0800 Subject: ceph: include type in ceph_entity_addr, filepath Include a type/version in ceph_entity_addr and filepath. Include extra byte in filepath encoding as necessary. Signed-off-by: Sage Weil --- fs/ceph/decode.h | 1 + fs/ceph/mds_client.c | 2 +- fs/ceph/messenger.c | 1 + fs/ceph/msgr.h | 5 +++-- 4 files changed, 6 insertions(+), 3 deletions(-) diff --git a/fs/ceph/decode.h b/fs/ceph/decode.h index 10de84896244..b90a33b948c7 100644 --- a/fs/ceph/decode.h +++ b/fs/ceph/decode.h @@ -138,6 +138,7 @@ static inline void ceph_encode_filepath(void **p, void *end, { u32 len = path ? strlen(path) : 0; BUG_ON(*p + sizeof(ino) + sizeof(len) + len > end); + ceph_encode_8(p, 1); ceph_encode_64(p, ino); ceph_encode_32(p, len); if (len) diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 93998a0678c4..4e3e8b229e67 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1325,7 +1325,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, } len = sizeof(*head) + - pathlen1 + pathlen2 + 2*(sizeof(u32) + sizeof(u64)); + pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)); /* calculate (max) length for cap releases */ len += sizeof(struct ceph_mds_request_release) * diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index 81bc779adb90..e4e8d4439d3a 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -1909,6 +1909,7 @@ struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr) msgr->inst.addr = *myaddr; /* select a random nonce */ + msgr->inst.addr.type = 0; get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce)); encode_my_addr(msgr); diff --git a/fs/ceph/msgr.h b/fs/ceph/msgr.h index 40b6189aa9e3..8aaab414f3f8 100644 --- a/fs/ceph/msgr.h +++ b/fs/ceph/msgr.h @@ -21,7 +21,7 @@ * whenever the wire protocol changes. try to keep this string length * constant. */ -#define CEPH_BANNER "ceph v026" +#define CEPH_BANNER "ceph v027" #define CEPH_BANNER_MAX_LEN 30 @@ -61,7 +61,8 @@ extern const char *ceph_entity_type_name(int type); * entity_addr -- network address */ struct ceph_entity_addr { - __le64 nonce; /* unique id for process (e.g. pid) */ + __le32 type; + __le32 nonce; /* unique id for process (e.g. pid) */ struct sockaddr_storage in_addr; } __attribute__ ((packed)); -- cgit v1.2.3 From 0f26c4b21b684825a6dd41f2bc04d48ff62d72f8 Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Fri, 29 Jan 2010 11:01:11 -0800 Subject: ceph: remove unreachable code We never truncate to a smaller size without contacting the MDS. Signed-off-by: Yehuda Sadeh Signed-off-by: Sage Weil --- fs/ceph/inode.c | 9 --------- 1 file changed, 9 deletions(-) diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 71e107fb4dbc..a4f573ab232e 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -1396,7 +1396,6 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr) int release = 0, dirtied = 0; int mask = 0; int err = 0; - int queue_trunc = 0; if (ceph_snap(inode) != CEPH_NOSNAP) return -EROFS; @@ -1510,11 +1509,6 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr) if ((issued & CEPH_CAP_FILE_EXCL) && attr->ia_size > inode->i_size) { inode->i_size = attr->ia_size; - if (attr->ia_size < inode->i_size) { - ci->i_truncate_size = attr->ia_size; - ci->i_truncate_pending++; - queue_trunc = 1; - } inode->i_blocks = (attr->ia_size + (1 << 9) - 1) >> 9; inode->i_ctime = attr->ia_ctime; @@ -1567,9 +1561,6 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr) release &= issued; spin_unlock(&inode->i_lock); - if (queue_trunc) - __ceph_do_pending_vmtruncate(inode); - if (mask) { req->r_inode = igrab(inode); req->r_inode_drop = release; -- cgit v1.2.3 From 0c948992a00d478c17042f4790b7d6b35299cf94 Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Mon, 1 Feb 2010 16:10:45 -0800 Subject: ceph: always send truncation info with read and write osd ops This fixes a bug where the read/write ops arrive the osd after a following truncation request. Signed-off-by: Yehuda Sadeh Signed-off-by: Sage Weil --- fs/ceph/ceph_fs.h | 2 +- fs/ceph/osd_client.c | 16 +++------------- fs/ceph/rados.h | 6 ++---- 3 files changed, 6 insertions(+), 18 deletions(-) diff --git a/fs/ceph/ceph_fs.h b/fs/ceph/ceph_fs.h index f3bfc3c4f6e6..004aae59d4ba 100644 --- a/fs/ceph/ceph_fs.h +++ b/fs/ceph/ceph_fs.h @@ -38,7 +38,7 @@ #define CEPH_OSD_PROTOCOL 8 /* cluster internal */ #define CEPH_MDS_PROTOCOL 9 /* cluster internal */ #define CEPH_MON_PROTOCOL 5 /* cluster internal */ -#define CEPH_OSDC_PROTOCOL 23 /* server/client */ +#define CEPH_OSDC_PROTOCOL 24 /* server/client */ #define CEPH_MDSC_PROTOCOL 32 /* server/client */ #define CEPH_MONC_PROTOCOL 15 /* server/client */ diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index df2106839713..944759b3079f 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -199,11 +199,9 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, struct ceph_osd_request_head *head; struct ceph_osd_op *op; void *p; - int do_trunc = truncate_seq && (off + *plen > truncate_size); - int num_op = 1 + do_sync + do_trunc; + int num_op = 1 + do_sync; size_t msg_size = sizeof(*head) + num_op*sizeof(*op); int err, i; - u64 prevofs; if (use_mempool) { req = mempool_alloc(osdc->req_mempool, GFP_NOFS); @@ -268,22 +266,14 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, req->r_request->hdr.data_len = cpu_to_le32(*plen); op->payload_len = cpu_to_le32(*plen); } + op->extent.truncate_size = cpu_to_le64(truncate_size); + op->extent.truncate_seq = cpu_to_le32(truncate_seq); /* fill in oid */ head->object_len = cpu_to_le32(req->r_oid_len); memcpy(p, req->r_oid, req->r_oid_len); p += req->r_oid_len; - /* additional ops */ - if (do_trunc) { - op++; - op->op = cpu_to_le16(opcode == CEPH_OSD_OP_READ ? - CEPH_OSD_OP_MASKTRUNC : CEPH_OSD_OP_SETTRUNC); - op->trunc.truncate_seq = cpu_to_le32(truncate_seq); - prevofs = le64_to_cpu((op-1)->extent.offset); - op->trunc.truncate_size = cpu_to_le64(truncate_size - - (off-prevofs)); - } if (do_sync) { op++; op->op = cpu_to_le16(CEPH_OSD_OP_STARTSYNC); diff --git a/fs/ceph/rados.h b/fs/ceph/rados.h index c5614d4ae34a..123fd845459e 100644 --- a/fs/ceph/rados.h +++ b/fs/ceph/rados.h @@ -304,15 +304,13 @@ struct ceph_osd_op { union { struct { __le64 offset, length; + __le64 truncate_size; + __le32 truncate_seq; } __attribute__ ((packed)) extent; struct { __le32 name_len; __le32 value_len; } __attribute__ ((packed)) xattr; - struct { - __le64 truncate_size; - __le32 truncate_seq; - } __attribute__ ((packed)) trunc; struct { __u8 class_len; __u8 method_len; -- cgit v1.2.3 From 79788c698b290426320e60374ed1324e4b5c69eb Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 2 Feb 2010 16:34:04 -0800 Subject: ceph: release all pages after successful osd write response We release all the pages, even if the osd response was different than the number of pages written. This could only happen due to truncation that arrives the osd in different order, for which we want the pages released anyway. Signed-off-by: Yehuda Sadeh Signed-off-by: Sage Weil --- fs/ceph/addr.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index a3bd9deb555c..8065dc92c611 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -524,9 +524,13 @@ static void writepages_finish(struct ceph_osd_request *req, bytes = le64_to_cpu(op->extent.length); if (rc >= 0) { - wrote = (bytes + (offset & ~PAGE_CACHE_MASK) + ~PAGE_CACHE_MASK) - >> PAGE_CACHE_SHIFT; - WARN_ON(wrote != req->r_num_pages); + /* + * Assume we wrote the pages we originally sent. The + * osd might reply with fewer pages if our writeback + * raced with a truncation and was adjusted at the osd, + * so don't believe the reply. + */ + wrote = req->r_num_pages; } else { wrote = 0; mapping_set_error(mapping, rc); -- cgit v1.2.3 From c7e337d6490d6f2f5e66ddf1b04d00b0dbd10108 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 2 Feb 2010 16:11:19 -0800 Subject: ceph: buffer decoding helpers Helper for decoding into a ceph_buffer, and other misc decoding helpers we will need. Signed-off-by: Yehuda Sadeh Signed-off-by: Sage Weil --- fs/ceph/buffer.c | 17 +++++++++++++++++ fs/ceph/buffer.h | 2 ++ fs/ceph/decode.h | 34 ++++++++++++++++++++++++++++++++++ 3 files changed, 53 insertions(+) diff --git a/fs/ceph/buffer.c b/fs/ceph/buffer.c index 2576bd452cb8..b98086c7aeba 100644 --- a/fs/ceph/buffer.c +++ b/fs/ceph/buffer.c @@ -1,6 +1,7 @@ #include "ceph_debug.h" #include "buffer.h" +#include "decode.h" struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp) { @@ -59,3 +60,19 @@ int ceph_buffer_alloc(struct ceph_buffer *b, int len, gfp_t gfp) return 0; } +int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end) +{ + size_t len; + + ceph_decode_need(p, end, sizeof(u32), bad); + len = ceph_decode_32(p); + dout("decode_buffer len %d\n", (int)len); + ceph_decode_need(p, end, len, bad); + *b = ceph_buffer_new(len, GFP_NOFS); + if (!*b) + return -ENOMEM; + ceph_decode_copy(p, (*b)->vec.iov_base, len); + return 0; +bad: + return -EINVAL; +} diff --git a/fs/ceph/buffer.h b/fs/ceph/buffer.h index 47b9514c5bbd..58d19014068f 100644 --- a/fs/ceph/buffer.h +++ b/fs/ceph/buffer.h @@ -34,4 +34,6 @@ static inline void ceph_buffer_put(struct ceph_buffer *b) kref_put(&b->kref, ceph_buffer_release); } +extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end); + #endif diff --git a/fs/ceph/decode.h b/fs/ceph/decode.h index b90a33b948c7..65b3e022eaf5 100644 --- a/fs/ceph/decode.h +++ b/fs/ceph/decode.h @@ -2,6 +2,7 @@ #define __CEPH_DECODE_H #include +#include #include "types.h" @@ -65,6 +66,11 @@ static inline void ceph_decode_copy(void **p, void *pv, size_t n) ceph_decode_need(p, end, sizeof(u16), bad); \ v = ceph_decode_16(p); \ } while (0) +#define ceph_decode_8_safe(p, end, v, bad) \ + do { \ + ceph_decode_need(p, end, sizeof(u8), bad); \ + v = ceph_decode_8(p); \ + } while (0) #define ceph_decode_copy_safe(p, end, pv, n, bad) \ do { \ @@ -156,5 +162,33 @@ static inline void ceph_encode_string(void **p, void *end, *p += len; } +#define ceph_encode_need(p, end, n, bad) \ + do { \ + if (unlikely(*(p) + (n) > (end))) \ + goto bad; \ + } while (0) + +#define ceph_encode_64_safe(p, end, v, bad) \ + do { \ + ceph_encode_need(p, end, sizeof(u64), bad); \ + ceph_encode_64(p, v); \ + } while (0) +#define ceph_encode_32_safe(p, end, v, bad) \ + do { \ + ceph_encode_need(p, end, sizeof(u32), bad); \ + ceph_encode_32(p, v); \ + } while (0) +#define ceph_encode_16_safe(p, end, v, bad) \ + do { \ + ceph_encode_need(p, end, sizeof(u16), bad); \ + ceph_encode_16(p, v); \ + } while (0) + +#define ceph_encode_copy_safe(p, end, pv, n, bad) \ + do { \ + ceph_encode_need(p, end, n, bad); \ + ceph_encode_copy(p, pv, n); \ + } while (0) + #endif -- cgit v1.2.3 From 8b6e4f2d8b21c25225b1ce8d53a2e03b92cc8522 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 2 Feb 2010 16:07:07 -0800 Subject: ceph: aes crypto and base64 encode/decode helpers Helpers to encrypt/decrypt AES and base64. Signed-off-by: Yehuda Sadeh Signed-off-by: Sage Weil --- fs/ceph/Kconfig | 1 + fs/ceph/Makefile | 1 + fs/ceph/armor.c | 99 ++++++++++++++ fs/ceph/crypto.c | 408 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ fs/ceph/crypto.h | 48 +++++++ 5 files changed, 557 insertions(+) create mode 100644 fs/ceph/armor.c create mode 100644 fs/ceph/crypto.c create mode 100644 fs/ceph/crypto.h diff --git a/fs/ceph/Kconfig b/fs/ceph/Kconfig index bc1fbd956187..04b8280582a9 100644 --- a/fs/ceph/Kconfig +++ b/fs/ceph/Kconfig @@ -2,6 +2,7 @@ config CEPH_FS tristate "Ceph distributed file system (EXPERIMENTAL)" depends on INET && EXPERIMENTAL select LIBCRC32C + select CONFIG_CRYPTO_AES help Choose Y or M here to include support for mounting the experimental Ceph distributed file system. Ceph is an extremely diff --git a/fs/ceph/Makefile b/fs/ceph/Makefile index 47caf2f1b75a..85a588e43e7d 100644 --- a/fs/ceph/Makefile +++ b/fs/ceph/Makefile @@ -14,6 +14,7 @@ ceph-objs := super.o inode.o dir.o file.o addr.o ioctl.o \ osd_client.o osdmap.o crush/crush.o crush/mapper.o crush/hash.o \ debugfs.o \ auth.o auth_none.o \ + crypto.o armor.o \ ceph_fs.o ceph_strings.o ceph_hash.o ceph_frag.o else diff --git a/fs/ceph/armor.c b/fs/ceph/armor.c new file mode 100644 index 000000000000..67b2c030924b --- /dev/null +++ b/fs/ceph/armor.c @@ -0,0 +1,99 @@ + +#include + +/* + * base64 encode/decode. + */ + +const char *pem_key = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; + +static int encode_bits(int c) +{ + return pem_key[c]; +} + +static int decode_bits(char c) +{ + if (c >= 'A' && c <= 'Z') + return c - 'A'; + if (c >= 'a' && c <= 'z') + return c - 'a' + 26; + if (c >= '0' && c <= '9') + return c - '0' + 52; + if (c == '+') + return 62; + if (c == '/') + return 63; + if (c == '=') + return 0; /* just non-negative, please */ + return -EINVAL; +} + +int ceph_armor(char *dst, const char *src, const char *end) +{ + int olen = 0; + int line = 0; + + while (src < end) { + unsigned char a, b, c; + + a = *src++; + *dst++ = encode_bits(a >> 2); + if (src < end) { + b = *src++; + *dst++ = encode_bits(((a & 3) << 4) | (b >> 4)); + if (src < end) { + c = *src++; + *dst++ = encode_bits(((b & 15) << 2) | + (c >> 6)); + *dst++ = encode_bits(c & 63); + } else { + *dst++ = encode_bits((b & 15) << 2); + *dst++ = '='; + } + } else { + *dst++ = encode_bits(((a & 3) << 4)); + *dst++ = '='; + *dst++ = '='; + } + olen += 4; + line += 4; + if (line == 64) { + line = 0; + *(dst++) = '\n'; + olen++; + } + } + return olen; +} + +int ceph_unarmor(char *dst, const char *src, const char *end) +{ + int olen = 0; + + while (src < end) { + int a, b, c, d; + + if (src < end && src[0] == '\n') + src++; + if (src + 4 > end) + return -EINVAL; + a = decode_bits(src[0]); + b = decode_bits(src[1]); + c = decode_bits(src[2]); + d = decode_bits(src[3]); + if (a < 0 || b < 0 || c < 0 || d < 0) + return -EINVAL; + + *dst++ = (a << 2) | (b >> 4); + if (src[2] == '=') + return olen + 1; + *dst++ = ((b & 15) << 4) | (c >> 2); + if (src[3] == '=') + return olen + 2; + *dst++ = ((c & 3) << 6) | d; + olen += 3; + src += 4; + } + return olen; +} diff --git a/fs/ceph/crypto.c b/fs/ceph/crypto.c new file mode 100644 index 000000000000..291ac288e791 --- /dev/null +++ b/fs/ceph/crypto.c @@ -0,0 +1,408 @@ + +#include "ceph_debug.h" + +#include +#include +#include + +#include "crypto.h" +#include "decode.h" + +int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end) +{ + if (*p + sizeof(u16) + sizeof(key->created) + + sizeof(u16) + key->len > end) + return -ERANGE; + ceph_encode_16(p, key->type); + ceph_encode_copy(p, &key->created, sizeof(key->created)); + ceph_encode_16(p, key->len); + ceph_encode_copy(p, key->key, key->len); + return 0; +} + +int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end) +{ + ceph_decode_need(p, end, 2*sizeof(u16) + sizeof(key->created), bad); + key->type = ceph_decode_16(p); + ceph_decode_copy(p, &key->created, sizeof(key->created)); + key->len = ceph_decode_16(p); + ceph_decode_need(p, end, key->len, bad); + key->key = kmalloc(key->len, GFP_NOFS); + if (!key->key) + return -ENOMEM; + ceph_decode_copy(p, key->key, key->len); + return 0; + +bad: + dout("failed to decode crypto key\n"); + return -EINVAL; +} + +int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *inkey) +{ + int inlen = strlen(inkey); + int blen = inlen * 3 / 4; + void *buf, *p; + int ret; + + dout("crypto_key_unarmor %s\n", inkey); + buf = kmalloc(blen, GFP_NOFS); + if (!buf) + return -ENOMEM; + blen = ceph_unarmor(buf, inkey, inkey+inlen); + if (blen < 0) { + kfree(buf); + return blen; + } + + p = buf; + ret = ceph_crypto_key_decode(key, &p, p + blen); + kfree(buf); + if (ret) + return ret; + dout("crypto_key_unarmor key %p type %d len %d\n", key, + key->type, key->len); + return 0; +} + + + +#define AES_KEY_SIZE 16 + +static struct crypto_blkcipher *ceph_crypto_alloc_cipher(void) +{ + return crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC); +} + +const u8 *aes_iv = "cephsageyudagreg"; + +int ceph_aes_encrypt(const void *key, int key_len, void *dst, size_t *dst_len, + const void *src, size_t src_len) +{ + struct scatterlist sg_in[2], sg_out[1]; + struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); + struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; + int ret; + void *iv; + int ivsize; + size_t zero_padding = (0x10 - (src_len & 0x0f)); + char pad[16]; + + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + memset(pad, zero_padding, zero_padding); + + *dst_len = src_len + zero_padding; + + crypto_blkcipher_setkey((void *)tfm, key, key_len); + sg_init_table(sg_in, 2); + sg_set_buf(&sg_in[0], src, src_len); + sg_set_buf(&sg_in[1], pad, zero_padding); + sg_init_table(sg_out, 1); + sg_set_buf(sg_out, dst, *dst_len); + iv = crypto_blkcipher_crt(tfm)->iv; + ivsize = crypto_blkcipher_ivsize(tfm); + + memcpy(iv, aes_iv, ivsize); + /* + print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1, + key, key_len, 1); + print_hex_dump(KERN_ERR, "enc src: ", DUMP_PREFIX_NONE, 16, 1, + src, src_len, 1); + print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1, + pad, zero_padding, 1); + */ + ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, + src_len + zero_padding); + crypto_free_blkcipher(tfm); + if (ret < 0) + pr_err("ceph_aes_crypt failed %d\n", ret); + /* + print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1, + dst, *dst_len, 1); + */ + return 0; +} + +int ceph_aes_encrypt2(const void *key, int key_len, void *dst, size_t *dst_len, + const void *src1, size_t src1_len, + const void *src2, size_t src2_len) +{ + struct scatterlist sg_in[3], sg_out[1]; + struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); + struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; + int ret; + void *iv; + int ivsize; + size_t zero_padding = (0x10 - ((src1_len + src2_len) & 0x0f)); + char pad[16]; + + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + memset(pad, zero_padding, zero_padding); + + *dst_len = src1_len + src2_len + zero_padding; + + crypto_blkcipher_setkey((void *)tfm, key, key_len); + sg_init_table(sg_in, 3); + sg_set_buf(&sg_in[0], src1, src1_len); + sg_set_buf(&sg_in[1], src2, src2_len); + sg_set_buf(&sg_in[2], pad, zero_padding); + sg_init_table(sg_out, 1); + sg_set_buf(sg_out, dst, *dst_len); + iv = crypto_blkcipher_crt(tfm)->iv; + ivsize = crypto_blkcipher_ivsize(tfm); + + memcpy(iv, aes_iv, ivsize); + /* + print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1, + key, key_len, 1); + print_hex_dump(KERN_ERR, "enc src1: ", DUMP_PREFIX_NONE, 16, 1, + src1, src1_len, 1); + print_hex_dump(KERN_ERR, "enc src2: ", DUMP_PREFIX_NONE, 16, 1, + src2, src2_len, 1); + print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1, + pad, zero_padding, 1); + */ + ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, + src1_len + src2_len + zero_padding); + crypto_free_blkcipher(tfm); + if (ret < 0) + pr_err("ceph_aes_crypt2 failed %d\n", ret); + /* + print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1, + dst, *dst_len, 1); + */ + return 0; +} + +int ceph_aes_decrypt(const void *key, int key_len, void *dst, size_t *dst_len, + const void *src, size_t src_len) +{ + struct scatterlist sg_in[1], sg_out[2]; + struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); + struct blkcipher_desc desc = { .tfm = tfm }; + char pad[16]; + void *iv; + int ivsize; + int ret; + int last_byte; + + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + crypto_blkcipher_setkey((void *)tfm, key, key_len); + sg_init_table(sg_in, 1); + sg_init_table(sg_out, 2); + sg_set_buf(sg_in, src, src_len); + sg_set_buf(&sg_out[0], dst, *dst_len); + sg_set_buf(&sg_out[1], pad, sizeof(pad)); + + iv = crypto_blkcipher_crt(tfm)->iv; + ivsize = crypto_blkcipher_ivsize(tfm); + + memcpy(iv, aes_iv, ivsize); + + /* + print_hex_dump(KERN_ERR, "dec key: ", DUMP_PREFIX_NONE, 16, 1, + key, key_len, 1); + print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1, + src, src_len, 1); + */ + + ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len); + crypto_free_blkcipher(tfm); + if (ret < 0) { + pr_err("ceph_aes_decrypt failed %d\n", ret); + return ret; + } + + if (src_len <= *dst_len) + last_byte = ((char *)dst)[src_len - 1]; + else + last_byte = pad[src_len - *dst_len - 1]; + if (last_byte <= 16 && src_len >= last_byte) { + *dst_len = src_len - last_byte; + } else { + pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n", + last_byte, (int)src_len); + return -EPERM; /* bad padding */ + } + /* + print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1, + dst, *dst_len, 1); + */ + return 0; +} + +int ceph_aes_decrypt2(const void *key, int key_len, + void *dst1, size_t *dst1_len, + void *dst2, size_t *dst2_len, + const void *src, size_t src_len) +{ + struct scatterlist sg_in[1], sg_out[3]; + struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); + struct blkcipher_desc desc = { .tfm = tfm }; + char pad[16]; + void *iv; + int ivsize; + int ret; + int last_byte; + + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + sg_init_table(sg_in, 1); + sg_set_buf(sg_in, src, src_len); + sg_init_table(sg_out, 3); + sg_set_buf(&sg_out[0], dst1, *dst1_len); + sg_set_buf(&sg_out[1], dst2, *dst2_len); + sg_set_buf(&sg_out[2], pad, sizeof(pad)); + + crypto_blkcipher_setkey((void *)tfm, key, key_len); + iv = crypto_blkcipher_crt(tfm)->iv; + ivsize = crypto_blkcipher_ivsize(tfm); + + memcpy(iv, aes_iv, ivsize); + + /* + print_hex_dump(KERN_ERR, "dec key: ", DUMP_PREFIX_NONE, 16, 1, + key, key_len, 1); + print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1, + src, src_len, 1); + */ + + ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len); + crypto_free_blkcipher(tfm); + if (ret < 0) { + pr_err("ceph_aes_decrypt failed %d\n", ret); + return ret; + } + + if (src_len <= *dst1_len) + last_byte = ((char *)dst1)[src_len - 1]; + else if (src_len <= *dst1_len + *dst2_len) + last_byte = ((char *)dst2)[src_len - *dst1_len - 1]; + else + last_byte = pad[src_len - *dst1_len - *dst2_len - 1]; + if (last_byte <= 16 && src_len >= last_byte) { + src_len -= last_byte; + } else { + pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n", + last_byte, (int)src_len); + return -EPERM; /* bad padding */ + } + + if (src_len < *dst1_len) { + *dst1_len = src_len; + *dst2_len = 0; + } else { + *dst2_len = src_len - *dst1_len; + } + /* + print_hex_dump(KERN_ERR, "dec out1: ", DUMP_PREFIX_NONE, 16, 1, + dst1, *dst1_len, 1); + print_hex_dump(KERN_ERR, "dec out2: ", DUMP_PREFIX_NONE, 16, 1, + dst2, *dst2_len, 1); + */ + + return 0; +} + + +int ceph_decrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len, + const void *src, size_t src_len) +{ + switch (secret->type) { + case CEPH_CRYPTO_NONE: + if (*dst_len < src_len) + return -ERANGE; + memcpy(dst, src, src_len); + *dst_len = src_len; + return 0; + + case CEPH_CRYPTO_AES: + return ceph_aes_decrypt(secret->key, secret->len, dst, + dst_len, src, src_len); + + default: + return -EINVAL; + } +} + +int ceph_decrypt2(struct ceph_crypto_key *secret, + void *dst1, size_t *dst1_len, + void *dst2, size_t *dst2_len, + const void *src, size_t src_len) +{ + size_t t; + + switch (secret->type) { + case CEPH_CRYPTO_NONE: + if (*dst1_len + *dst2_len < src_len) + return -ERANGE; + t = min(*dst1_len, src_len); + memcpy(dst1, src, t); + *dst1_len = t; + src += t; + src_len -= t; + if (src_len) { + t = min(*dst2_len, src_len); + memcpy(dst2, src, t); + *dst2_len = t; + } + return 0; + + case CEPH_CRYPTO_AES: + return ceph_aes_decrypt2(secret->key, secret->len, + dst1, dst1_len, dst2, dst2_len, + src, src_len); + + default: + return -EINVAL; + } +} + +int ceph_encrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len, + const void *src, size_t src_len) +{ + switch (secret->type) { + case CEPH_CRYPTO_NONE: + if (*dst_len < src_len) + return -ERANGE; + memcpy(dst, src, src_len); + *dst_len = src_len; + return 0; + + case CEPH_CRYPTO_AES: + return ceph_aes_encrypt(secret->key, secret->len, dst, + dst_len, src, src_len); + + default: + return -EINVAL; + } +} + +int ceph_encrypt2(struct ceph_crypto_key *secret, void *dst, size_t *dst_len, + const void *src1, size_t src1_len, + const void *src2, size_t src2_len) +{ + switch (secret->type) { + case CEPH_CRYPTO_NONE: + if (*dst_len < src1_len + src2_len) + return -ERANGE; + memcpy(dst, src1, src1_len); + memcpy(dst + src1_len, src2, src2_len); + *dst_len = src1_len + src2_len; + return 0; + + case CEPH_CRYPTO_AES: + return ceph_aes_encrypt2(secret->key, secret->len, dst, dst_len, + src1, src1_len, src2, src2_len); + + default: + return -EINVAL; + } +} diff --git a/fs/ceph/crypto.h b/fs/ceph/crypto.h new file mode 100644 index 000000000000..40b502e6bd89 --- /dev/null +++ b/fs/ceph/crypto.h @@ -0,0 +1,48 @@ +#ifndef _FS_CEPH_CRYPTO_H +#define _FS_CEPH_CRYPTO_H + +#include "types.h" +#include "buffer.h" + +/* + * cryptographic secret + */ +struct ceph_crypto_key { + int type; + struct ceph_timespec created; + int len; + void *key; +}; + +static inline void ceph_crypto_key_destroy(struct ceph_crypto_key *key) +{ + kfree(key->key); +} + +extern int ceph_crypto_key_encode(struct ceph_crypto_key *key, + void **p, void *end); +extern int ceph_crypto_key_decode(struct ceph_crypto_key *key, + void **p, void *end); +extern int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *in); + +/* crypto.c */ +extern int ceph_decrypt(struct ceph_crypto_key *secret, + void *dst, size_t *dst_len, + const void *src, size_t src_len); +extern int ceph_encrypt(struct ceph_crypto_key *secret, + void *dst, size_t *dst_len, + const void *src, size_t src_len); +extern int ceph_decrypt2(struct ceph_crypto_key *secret, + void *dst1, size_t *dst1_len, + void *dst2, size_t *dst2_len, + const void *src, size_t src_len); +extern int ceph_encrypt2(struct ceph_crypto_key *secret, + void *dst, size_t *dst_len, + const void *src1, size_t src1_len, + const void *src2, size_t src2_len); + +/* armor.c */ +extern int ceph_armor(char *dst, const void *src, const void *end); +extern int ceph_unarmor(void *dst, const char *src, const char *end); + +#endif -- cgit v1.2.3 From 9bd2e6f8ba71facf1cadb7154a7e0e4d345a6aba Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 2 Feb 2010 16:21:06 -0800 Subject: ceph: allow renewal of auth credentials Add infrastructure to allow the mon_client to periodically renew its auth credentials. Also add a messenger callback that will force such a renewal if a peer rejects our authenticator. Signed-off-by: Yehuda Sadeh Signed-off-by: Sage Weil --- fs/ceph/auth.c | 61 ++++++++++++++++++++++++++++++++++++---------------- fs/ceph/auth.h | 7 ++++++ fs/ceph/mds_client.c | 13 +++++++++++ fs/ceph/messenger.c | 9 ++++++++ fs/ceph/messenger.h | 1 + fs/ceph/mon_client.c | 55 ++++++++++++++++++++++++++++++++++++++++------ fs/ceph/mon_client.h | 3 +++ fs/ceph/osd_client.c | 12 +++++++++++ fs/ceph/super.c | 12 +++++------ fs/ceph/super.h | 4 ++-- 10 files changed, 144 insertions(+), 33 deletions(-) diff --git a/fs/ceph/auth.c b/fs/ceph/auth.c index 32f2e2a021ab..d5872d4f92bf 100644 --- a/fs/ceph/auth.c +++ b/fs/ceph/auth.c @@ -125,6 +125,30 @@ bad: return -ERANGE; } +int ceph_build_auth_request(struct ceph_auth_client *ac, + void *msg_buf, size_t msg_len) +{ + struct ceph_mon_request_header *monhdr = msg_buf; + void *p = monhdr + 1; + void *end = msg_buf + msg_len; + int ret; + + monhdr->have_version = 0; + monhdr->session_mon = cpu_to_le16(-1); + monhdr->session_mon_tid = 0; + + ceph_encode_32(&p, ac->protocol); + + ret = ac->ops->build_request(ac, p + sizeof(u32), end); + if (ret < 0) { + pr_err("error %d building request\n", ret); + return ret; + } + dout(" built request %d bytes\n", ret); + ceph_encode_32(&p, ret); + return p + ret - msg_buf; +} + /* * Handle auth message from monitor. */ @@ -188,28 +212,13 @@ int ceph_handle_auth_reply(struct ceph_auth_client *ac, goto out; } } + + ac->negotiating = false; } ret = ac->ops->handle_reply(ac, result, payload, payload_end); if (ret == -EAGAIN) { - struct ceph_mon_request_header *monhdr = reply_buf; - void *p = reply_buf + 1; - void *end = reply_buf + reply_len; - - monhdr->have_version = 0; - monhdr->session_mon = cpu_to_le16(-1); - monhdr->session_mon_tid = 0; - - ceph_encode_32(&p, ac->protocol); - - ret = ac->ops->build_request(ac, p + sizeof(u32), end); - if (ret < 0) { - pr_err("error %d building request\n", ret); - goto out; - } - dout(" built request %d bytes\n", ret); - ceph_encode_32(&p, ret); - return p + ret - reply_buf; + return ceph_build_auth_request(ac, reply_buf, reply_len); } else if (ret) { pr_err("authentication error %d\n", ret); return ret; @@ -222,4 +231,20 @@ out: return ret; } +int ceph_build_auth(struct ceph_auth_client *ac, + void *msg_buf, size_t msg_len) +{ + if (!ac->protocol) + return ceph_auth_build_hello(ac, msg_buf, msg_len); + BUG_ON(!ac->ops); + if (!ac->ops->is_authenticated(ac)) + return ceph_build_auth_request(ac, msg_buf, msg_len); + return 0; +} +int ceph_auth_is_authenticated(struct ceph_auth_client *ac) +{ + if (!ac->ops) + return 0; + return ac->ops->is_authenticated(ac); +} diff --git a/fs/ceph/auth.h b/fs/ceph/auth.h index 4d8cdf6bb3b6..ca4f57cfb267 100644 --- a/fs/ceph/auth.h +++ b/fs/ceph/auth.h @@ -42,6 +42,8 @@ struct ceph_auth_client_ops { struct ceph_authorizer *a, size_t len); void (*destroy_authorizer)(struct ceph_auth_client *ac, struct ceph_authorizer *a); + void (*invalidate_authorizer)(struct ceph_auth_client *ac, + int peer_type); /* reset when we (re)connect to a monitor */ void (*reset)(struct ceph_auth_client *ac); @@ -74,4 +76,9 @@ extern int ceph_handle_auth_reply(struct ceph_auth_client *ac, void *reply_buf, size_t reply_len); extern int ceph_entity_name_encode(const char *name, void **p, void *end); +extern int ceph_build_auth(struct ceph_auth_client *ac, + void *msg_buf, size_t msg_len); + +extern int ceph_auth_is_authenticated(struct ceph_auth_client *ac); + #endif diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 4e3e8b229e67..aa8506bad42d 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -2946,12 +2946,25 @@ static int verify_authorizer_reply(struct ceph_connection *con, int len) return ac->ops->verify_authorizer_reply(ac, s->s_authorizer, len); } +static int invalidate_authorizer(struct ceph_connection *con) +{ + struct ceph_mds_session *s = con->private; + struct ceph_mds_client *mdsc = s->s_mdsc; + struct ceph_auth_client *ac = mdsc->client->monc.auth; + + if (ac->ops->invalidate_authorizer) + ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS); + + return ceph_monc_validate_auth(&mdsc->client->monc); +} + const static struct ceph_connection_operations mds_con_ops = { .get = con_get, .put = con_put, .dispatch = dispatch, .get_authorizer = get_authorizer, .verify_authorizer_reply = verify_authorizer_reply, + .invalidate_authorizer = invalidate_authorizer, .peer_reset = peer_reset, }; diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index e4e8d4439d3a..c4341784ec8f 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -1849,6 +1849,15 @@ static void ceph_fault(struct ceph_connection *con) con->in_msg = NULL; } + /* + * in case we faulted due to authentication, invalidate our + * current tickets so that we can get new ones. + */ + if (con->auth_retry && con->ops->invalidate_authorizer) { + dout("calling invalidate_authorizer()\n"); + con->ops->invalidate_authorizer(con); + } + /* If there are no messages in the queue, place the connection * in a STANDBY state (i.e., don't try to reconnect just yet). */ if (list_empty(&con->out_queue) && !con->out_keepalive_pending) { diff --git a/fs/ceph/messenger.h b/fs/ceph/messenger.h index c26a3d8aa78c..c9735378be3f 100644 --- a/fs/ceph/messenger.h +++ b/fs/ceph/messenger.h @@ -32,6 +32,7 @@ struct ceph_connection_operations { void **buf, int *len, int *proto, void **reply_buf, int *reply_len, int force_new); int (*verify_authorizer_reply) (struct ceph_connection *con, int len); + int (*invalidate_authorizer)(struct ceph_connection *con); /* protocol version mismatch */ void (*bad_proto) (struct ceph_connection *con); diff --git a/fs/ceph/mon_client.c b/fs/ceph/mon_client.c index 3f7ae7f73c50..fec41a0eff86 100644 --- a/fs/ceph/mon_client.c +++ b/fs/ceph/mon_client.c @@ -29,6 +29,8 @@ const static struct ceph_connection_operations mon_con_ops; +static int __validate_auth(struct ceph_mon_client *monc); + /* * Decode a monmap blob (e.g., during mount). */ @@ -103,6 +105,7 @@ static void __close_session(struct ceph_mon_client *monc) ceph_con_revoke(monc->con, monc->m_auth); ceph_con_close(monc->con); monc->cur_mon = -1; + monc->pending_auth = 0; ceph_auth_reset(monc->auth); } } @@ -334,7 +337,7 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc, out: mutex_unlock(&monc->mutex); - wake_up(&client->mount_wq); + wake_up(&client->auth_wq); } /* @@ -477,6 +480,11 @@ static void delayed_work(struct work_struct *work) __open_session(monc); /* continue hunting */ } else { ceph_con_keepalive(monc->con); + mutex_unlock(&monc->mutex); + + __validate_auth(monc); + + mutex_lock(&monc->mutex); if (monc->auth->ops->is_authenticated(monc->auth)) __send_subscribe(monc); } @@ -557,6 +565,7 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl) goto out_pool2; monc->m_auth = ceph_msg_new(CEPH_MSG_AUTH, 4096, 0, 0, NULL); + monc->pending_auth = 0; if (IS_ERR(monc->m_auth)) { err = PTR_ERR(monc->m_auth); monc->m_auth = NULL; @@ -614,6 +623,15 @@ void ceph_monc_stop(struct ceph_mon_client *monc) kfree(monc->monmap); } +static void __send_prepared_auth_request(struct ceph_mon_client *monc, int len) +{ + monc->pending_auth = 1; + monc->m_auth->front.iov_len = len; + monc->m_auth->hdr.front_len = cpu_to_le32(len); + ceph_msg_get(monc->m_auth); /* keep our ref */ + ceph_con_send(monc->con, monc->m_auth); +} + static void handle_auth_reply(struct ceph_mon_client *monc, struct ceph_msg *msg) @@ -621,18 +639,16 @@ static void handle_auth_reply(struct ceph_mon_client *monc, int ret; mutex_lock(&monc->mutex); + monc->pending_auth = 0; ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base, msg->front.iov_len, monc->m_auth->front.iov_base, monc->m_auth->front_max); if (ret < 0) { - monc->client->mount_err = ret; - wake_up(&monc->client->mount_wq); + monc->client->auth_err = ret; + wake_up(&monc->client->auth_wq); } else if (ret > 0) { - monc->m_auth->front.iov_len = ret; - monc->m_auth->hdr.front_len = cpu_to_le32(ret); - ceph_msg_get(monc->m_auth); /* keep our ref */ - ceph_con_send(monc->con, monc->m_auth); + __send_prepared_auth_request(monc, ret); } else if (monc->auth->ops->is_authenticated(monc->auth)) { dout("authenticated, starting session\n"); @@ -645,6 +661,31 @@ static void handle_auth_reply(struct ceph_mon_client *monc, mutex_unlock(&monc->mutex); } +static int __validate_auth(struct ceph_mon_client *monc) +{ + int ret; + + if (monc->pending_auth) + return 0; + + ret = ceph_build_auth(monc->auth, monc->m_auth->front.iov_base, + monc->m_auth->front_max); + if (ret <= 0) + return ret; /* either an error, or no need to authenticate */ + __send_prepared_auth_request(monc, ret); + return 0; +} + +int ceph_monc_validate_auth(struct ceph_mon_client *monc) +{ + int ret; + + mutex_lock(&monc->mutex); + ret = __validate_auth(monc); + mutex_unlock(&monc->mutex); + return ret; +} + /* * handle incoming message */ diff --git a/fs/ceph/mon_client.h b/fs/ceph/mon_client.h index c75b53302ecc..5ca8e48d4379 100644 --- a/fs/ceph/mon_client.h +++ b/fs/ceph/mon_client.h @@ -61,6 +61,7 @@ struct ceph_mon_client { struct ceph_auth_client *auth; struct ceph_msg *m_auth; + int pending_auth; bool hunting; int cur_mon; /* last monitor i contacted */ @@ -110,6 +111,8 @@ extern int ceph_monc_do_statfs(struct ceph_mon_client *monc, extern int ceph_monc_open_session(struct ceph_mon_client *monc); +extern int ceph_monc_validate_auth(struct ceph_mon_client *monc); + #endif diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index 944759b3079f..35c8afea13ec 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -1448,6 +1448,17 @@ static int verify_authorizer_reply(struct ceph_connection *con, int len) return ac->ops->verify_authorizer_reply(ac, o->o_authorizer, len); } +static int invalidate_authorizer(struct ceph_connection *con) +{ + struct ceph_osd *o = con->private; + struct ceph_osd_client *osdc = o->o_osdc; + struct ceph_auth_client *ac = osdc->client->monc.auth; + + if (ac->ops->invalidate_authorizer) + ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD); + + return ceph_monc_validate_auth(&osdc->client->monc); +} const static struct ceph_connection_operations osd_con_ops = { .get = get_osd_con, @@ -1455,6 +1466,7 @@ const static struct ceph_connection_operations osd_con_ops = { .dispatch = dispatch, .get_authorizer = get_authorizer, .verify_authorizer_reply = verify_authorizer_reply, + .invalidate_authorizer = invalidate_authorizer, .alloc_msg = alloc_msg, .fault = osd_reset, }; diff --git a/fs/ceph/super.c b/fs/ceph/super.c index cd81c84e96fc..3a2548951fe6 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -542,7 +542,7 @@ static struct ceph_client *ceph_create_client(struct ceph_mount_args *args) mutex_init(&client->mount_mutex); - init_waitqueue_head(&client->mount_wq); + init_waitqueue_head(&client->auth_wq); client->sb = NULL; client->mount_state = CEPH_MOUNT_MOUNTING; @@ -550,7 +550,7 @@ static struct ceph_client *ceph_create_client(struct ceph_mount_args *args) client->msgr = NULL; - client->mount_err = 0; + client->auth_err = 0; atomic_long_set(&client->writeback_count, 0); err = bdi_init(&client->backing_dev_info); @@ -742,13 +742,13 @@ static int ceph_mount(struct ceph_client *client, struct vfsmount *mnt, /* wait */ dout("mount waiting for mon_map\n"); - err = wait_event_interruptible_timeout(client->mount_wq, /* FIXME */ - have_mon_map(client) || (client->mount_err < 0), + err = wait_event_interruptible_timeout(client->auth_wq, + have_mon_map(client) || (client->auth_err < 0), timeout); if (err == -EINTR || err == -ERESTARTSYS) goto out; - if (client->mount_err < 0) { - err = client->mount_err; + if (client->auth_err < 0) { + err = client->auth_err; goto out; } } diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 62d9ae482d72..770f7b507fce 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -123,9 +123,9 @@ struct ceph_client { struct super_block *sb; unsigned long mount_state; - wait_queue_head_t mount_wq; + wait_queue_head_t auth_wq; - int mount_err; + int auth_err; struct ceph_messenger *msgr; /* messenger instance */ struct ceph_mon_client monc; -- cgit v1.2.3 From 07c8739c521cb029d0f3549556aae2d304513978 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Thu, 4 Feb 2010 09:42:20 -0800 Subject: ceph: add struct version to auth encoding Inlucde struct version in encoding. This will streamline future protocol changes. Signed-off-by: Sage Weil --- fs/ceph/auth.c | 3 +++ fs/ceph/auth_none.c | 1 + 2 files changed, 4 insertions(+) diff --git a/fs/ceph/auth.c b/fs/ceph/auth.c index d5872d4f92bf..b34ce0e41b4c 100644 --- a/fs/ceph/auth.c +++ b/fs/ceph/auth.c @@ -107,8 +107,11 @@ int ceph_auth_build_hello(struct ceph_auth_client *ac, void *buf, size_t len) lenp = p; p += sizeof(u32); + ceph_decode_need(&p, end, 1 + sizeof(u32), bad); + ceph_encode_8(&p, 1); num = ARRAY_SIZE(supported_protocols); ceph_encode_32(&p, num); + ceph_decode_need(&p, end, num * sizeof(u32), bad); for (i = 0; i < num; i++) ceph_encode_32(&p, supported_protocols[i]); diff --git a/fs/ceph/auth_none.c b/fs/ceph/auth_none.c index 631017eb7117..b4ef6f0a6c85 100644 --- a/fs/ceph/auth_none.c +++ b/fs/ceph/auth_none.c @@ -62,6 +62,7 @@ static int ceph_auth_none_create_authorizer( if (!ai->built_authorizer) { p = au->buf; end = p + sizeof(au->buf); + ceph_encode_8(&p, 1); ret = ceph_entity_name_encode(ac->name, &p, end - 8); if (ret < 0) goto bad; -- cgit v1.2.3 From ec0994e48ea2aebf62ff08376227f3a9ccf46262 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 2 Feb 2010 16:25:35 -0800 Subject: ceph: add support for auth_x authentication protocol The auth_x protocol implements support for a kerberos-like mutual authentication infrastructure used by Ceph. We do not simply use vanilla kerberos because of scalability and performance issues when dealing with a large cluster of nodes providing a single logical service. Auth_x provides mutual authentication of client and server and protects against replay and man in the middle attacks. It does not encrypt the full session over the wire, however, so data payload may still be snooped. Signed-off-by: Yehuda Sadeh Signed-off-by: Sage Weil --- fs/ceph/Makefile | 1 + fs/ceph/auth.c | 6 +- fs/ceph/auth_x.c | 656 ++++++++++++++++++++++++++++++++++++++++++++++ fs/ceph/auth_x.h | 49 ++++ fs/ceph/auth_x_protocol.h | 90 +++++++ 5 files changed, 801 insertions(+), 1 deletion(-) create mode 100644 fs/ceph/auth_x.c create mode 100644 fs/ceph/auth_x.h create mode 100644 fs/ceph/auth_x_protocol.h diff --git a/fs/ceph/Makefile b/fs/ceph/Makefile index 85a588e43e7d..6a660e610be8 100644 --- a/fs/ceph/Makefile +++ b/fs/ceph/Makefile @@ -15,6 +15,7 @@ ceph-objs := super.o inode.o dir.o file.o addr.o ioctl.o \ debugfs.o \ auth.o auth_none.o \ crypto.o armor.o \ + auth_x.o \ ceph_fs.o ceph_strings.o ceph_hash.o ceph_frag.o else diff --git a/fs/ceph/auth.c b/fs/ceph/auth.c index b34ce0e41b4c..abb204fea6c7 100644 --- a/fs/ceph/auth.c +++ b/fs/ceph/auth.c @@ -5,6 +5,7 @@ #include "types.h" #include "auth_none.h" +#include "auth_x.h" #include "decode.h" #include "super.h" @@ -14,7 +15,8 @@ * get protocol handler */ static u32 supported_protocols[] = { - CEPH_AUTH_NONE + CEPH_AUTH_NONE, + CEPH_AUTH_CEPHX }; int ceph_auth_init_protocol(struct ceph_auth_client *ac, int protocol) @@ -22,6 +24,8 @@ int ceph_auth_init_protocol(struct ceph_auth_client *ac, int protocol) switch (protocol) { case CEPH_AUTH_NONE: return ceph_auth_none_init(ac); + case CEPH_AUTH_CEPHX: + return ceph_x_init(ac); default: return -ENOENT; } diff --git a/fs/ceph/auth_x.c b/fs/ceph/auth_x.c new file mode 100644 index 000000000000..f0318427b6da --- /dev/null +++ b/fs/ceph/auth_x.c @@ -0,0 +1,656 @@ + +#include "ceph_debug.h" + +#include +#include +#include + +#include "auth_x.h" +#include "auth_x_protocol.h" +#include "crypto.h" +#include "auth.h" +#include "decode.h" + +struct kmem_cache *ceph_x_ticketbuf_cachep; + +#define TEMP_TICKET_BUF_LEN 256 + +static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed); + +static int ceph_x_is_authenticated(struct ceph_auth_client *ac) +{ + struct ceph_x_info *xi = ac->private; + int need; + + ceph_x_validate_tickets(ac, &need); + dout("ceph_x_is_authenticated want=%d need=%d have=%d\n", + ac->want_keys, need, xi->have_keys); + return (ac->want_keys & xi->have_keys) == ac->want_keys; +} + +static int ceph_x_encrypt(struct ceph_crypto_key *secret, + void *ibuf, int ilen, void *obuf, size_t olen) +{ + struct ceph_x_encrypt_header head = { + .struct_v = 1, + .magic = cpu_to_le64(CEPHX_ENC_MAGIC) + }; + size_t len = olen - sizeof(u32); + int ret; + + ret = ceph_encrypt2(secret, obuf + sizeof(u32), &len, + &head, sizeof(head), ibuf, ilen); + if (ret) + return ret; + ceph_encode_32(&obuf, len); + return len + sizeof(u32); +} + +static int ceph_x_decrypt(struct ceph_crypto_key *secret, + void **p, void *end, void *obuf, size_t olen) +{ + struct ceph_x_encrypt_header head; + size_t head_len = sizeof(head); + int len, ret; + + len = ceph_decode_32(p); + if (*p + len > end) + return -EINVAL; + + dout("ceph_x_decrypt len %d\n", len); + ret = ceph_decrypt2(secret, &head, &head_len, obuf, &olen, + *p, len); + if (ret) + return ret; + if (head.struct_v != 1 || le64_to_cpu(head.magic) != CEPHX_ENC_MAGIC) + return -EPERM; + *p += len; + return olen; +} + +/* + * get existing (or insert new) ticket handler + */ +struct ceph_x_ticket_handler *get_ticket_handler(struct ceph_auth_client *ac, + int service) +{ + struct ceph_x_ticket_handler *th; + struct ceph_x_info *xi = ac->private; + struct rb_node *parent = NULL, **p = &xi->ticket_handlers.rb_node; + + while (*p) { + parent = *p; + th = rb_entry(parent, struct ceph_x_ticket_handler, node); + if (service < th->service) + p = &(*p)->rb_left; + else if (service > th->service) + p = &(*p)->rb_right; + else + return th; + } + + /* add it */ + th = kzalloc(sizeof(*th), GFP_NOFS); + if (!th) + return ERR_PTR(-ENOMEM); + th->service = service; + rb_link_node(&th->node, parent, p); + rb_insert_color(&th->node, &xi->ticket_handlers); + return th; +} + +static void remove_ticket_handler(struct ceph_auth_client *ac, + struct ceph_x_ticket_handler *th) +{ + struct ceph_x_info *xi = ac->private; + + dout("remove_ticket_handler %p %d\n", th, th->service); + rb_erase(&th->node, &xi->ticket_handlers); + ceph_crypto_key_destroy(&th->session_key); + if (th->ticket_blob) + ceph_buffer_put(th->ticket_blob); + kfree(th); +} + +static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac, + struct ceph_crypto_key *secret, + void *buf, void *end) +{ + struct ceph_x_info *xi = ac->private; + int num; + void *p = buf; + int ret; + char *dbuf; + char *ticket_buf; + u8 struct_v; + + dbuf = kmem_cache_alloc(ceph_x_ticketbuf_cachep, GFP_NOFS | GFP_ATOMIC); + if (!dbuf) + return -ENOMEM; + + ret = -ENOMEM; + ticket_buf = kmem_cache_alloc(ceph_x_ticketbuf_cachep, + GFP_NOFS | GFP_ATOMIC); + if (!ticket_buf) + goto out_dbuf; + + ceph_decode_need(&p, end, 1 + sizeof(u32), bad); + struct_v = ceph_decode_8(&p); + if (struct_v != 1) + goto bad; + num = ceph_decode_32(&p); + dout("%d tickets\n", num); + while (num--) { + int type; + u8 struct_v; + struct ceph_x_ticket_handler *th; + void *dp, *dend; + int dlen; + char is_enc; + struct timespec validity; + struct ceph_crypto_key old_key; + void *tp, *tpend; + + ceph_decode_need(&p, end, sizeof(u32) + 1, bad); + + type = ceph_decode_32(&p); + dout(" ticket type %d %s\n", type, ceph_entity_type_name(type)); + + struct_v = ceph_decode_8(&p); + if (struct_v != 1) + goto bad; + + th = get_ticket_handler(ac, type); + if (IS_ERR(th)) { + ret = PTR_ERR(th); + goto out; + } + + /* blob for me */ + dlen = ceph_x_decrypt(secret, &p, end, dbuf, + TEMP_TICKET_BUF_LEN); + if (dlen <= 0) { + ret = dlen; + goto out; + } + dout(" decrypted %d bytes\n", dlen); + dend = dbuf + dlen; + dp = dbuf; + + struct_v = ceph_decode_8(&dp); + if (struct_v != 1) + goto bad; + + memcpy(&old_key, &th->session_key, sizeof(old_key)); + ret = ceph_crypto_key_decode(&th->session_key, &dp, dend); + if (ret) + goto out; + + ceph_decode_copy(&dp, &th->validity, sizeof(th->validity)); + ceph_decode_timespec(&validity, &th->validity); + th->expires = get_seconds() + validity.tv_sec; + th->renew_after = th->expires - (validity.tv_sec / 4); + dout(" expires=%lu renew_after=%lu\n", th->expires, + th->renew_after); + + /* ticket blob for service */ + ceph_decode_8_safe(&p, end, is_enc, bad); + tp = ticket_buf; + if (is_enc) { + /* encrypted */ + dout(" encrypted ticket\n"); + dlen = ceph_x_decrypt(&old_key, &p, end, ticket_buf, + TEMP_TICKET_BUF_LEN); + if (dlen < 0) { + ret = dlen; + goto out; + } + dlen = ceph_decode_32(&tp); + } else { + /* unencrypted */ + ceph_decode_32_safe(&p, end, dlen, bad); + ceph_decode_need(&p, end, dlen, bad); + ceph_decode_copy(&p, ticket_buf, dlen); + } + tpend = tp + dlen; + dout(" ticket blob is %d bytes\n", dlen); + ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad); + struct_v = ceph_decode_8(&tp); + th->secret_id = ceph_decode_64(&tp); + ret = ceph_decode_buffer(&th->ticket_blob, &tp, tpend); + if (ret) + goto out; + dout(" got ticket service %d (%s) secret_id %lld len %d\n", + type, ceph_entity_type_name(type), th->secret_id, + (int)th->ticket_blob->vec.iov_len); + xi->have_keys |= th->service; + } + + ret = 0; +out: + kmem_cache_free(ceph_x_ticketbuf_cachep, ticket_buf); +out_dbuf: + kmem_cache_free(ceph_x_ticketbuf_cachep, dbuf); + return ret; + +bad: + ret = -EINVAL; + goto out; +} + +static int ceph_x_build_authorizer(struct ceph_auth_client *ac, + struct ceph_x_ticket_handler *th, + struct ceph_x_authorizer *au) +{ + int len; + struct ceph_x_authorize_a *msg_a; + struct ceph_x_authorize_b msg_b; + void *p, *end; + int ret; + int ticket_blob_len = + (th->ticket_blob ? th->ticket_blob->vec.iov_len : 0); + + dout("build_authorizer for %s %p\n", + ceph_entity_type_name(th->service), au); + + len = sizeof(*msg_a) + sizeof(msg_b) + sizeof(u32) + + ticket_blob_len + 16; + dout(" need len %d\n", len); + if (au->buf && au->buf->alloc_len < len) { + ceph_buffer_put(au->buf); + au->buf = NULL; + } + if (!au->buf) { + au->buf = ceph_buffer_new(len, GFP_NOFS); + if (!au->buf) + return -ENOMEM; + } + au->service = th->service; + + msg_a = au->buf->vec.iov_base; + msg_a->struct_v = 1; + msg_a->global_id = cpu_to_le64(ac->global_id); + msg_a->service_id = cpu_to_le32(th->service); + msg_a->ticket_blob.struct_v = 1; + msg_a->ticket_blob.secret_id = cpu_to_le64(th->secret_id); + msg_a->ticket_blob.blob_len = cpu_to_le32(ticket_blob_len); + if (ticket_blob_len) { + memcpy(msg_a->ticket_blob.blob, th->ticket_blob->vec.iov_base, + th->ticket_blob->vec.iov_len); + } + dout(" th %p secret_id %lld %lld\n", th, th->secret_id, + le64_to_cpu(msg_a->ticket_blob.secret_id)); + + p = msg_a + 1; + p += ticket_blob_len; + end = au->buf->vec.iov_base + au->buf->vec.iov_len; + + get_random_bytes(&au->nonce, sizeof(au->nonce)); + msg_b.struct_v = 1; + msg_b.nonce = cpu_to_le64(au->nonce); + ret = ceph_x_encrypt(&th->session_key, &msg_b, sizeof(msg_b), + p, end - p); + if (ret < 0) + goto out_buf; + p += ret; + au->buf->vec.iov_len = p - au->buf->vec.iov_base; + dout(" built authorizer nonce %llx len %d\n", au->nonce, + (int)au->buf->vec.iov_len); + return 0; + +out_buf: + ceph_buffer_put(au->buf); + au->buf = NULL; + return ret; +} + +static int ceph_x_encode_ticket(struct ceph_x_ticket_handler *th, + void **p, void *end) +{ + ceph_decode_need(p, end, 1 + sizeof(u64), bad); + ceph_encode_8(p, 1); + ceph_encode_64(p, th->secret_id); + if (th->ticket_blob) { + const char *buf = th->ticket_blob->vec.iov_base; + u32 len = th->ticket_blob->vec.iov_len; + + ceph_encode_32_safe(p, end, len, bad); + ceph_encode_copy_safe(p, end, buf, len, bad); + } else { + ceph_encode_32_safe(p, end, 0, bad); + } + + return 0; +bad: + return -ERANGE; +} + +static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed) +{ + int want = ac->want_keys; + struct ceph_x_info *xi = ac->private; + int service; + + *pneed = ac->want_keys & ~(xi->have_keys); + + for (service = 1; service <= want; service <<= 1) { + struct ceph_x_ticket_handler *th; + + if (!(ac->want_keys & service)) + continue; + + if (*pneed & service) + continue; + + th = get_ticket_handler(ac, service); + + if (!th) { + *pneed |= service; + continue; + } + + if (get_seconds() >= th->renew_after) + *pneed |= service; + if (get_seconds() >= th->expires) + xi->have_keys &= ~service; + } +} + + +static int ceph_x_build_request(struct ceph_auth_client *ac, + void *buf, void *end) +{ + struct ceph_x_info *xi = ac->private; + int need; + struct ceph_x_request_header *head = buf; + int ret; + struct ceph_x_ticket_handler *th = + get_ticket_handler(ac, CEPH_ENTITY_TYPE_AUTH); + + ceph_x_validate_tickets(ac, &need); + + dout("build_request want %x have %x need %x\n", + ac->want_keys, xi->have_keys, need); + + if (need & CEPH_ENTITY_TYPE_AUTH) { + struct ceph_x_authenticate *auth = (void *)(head + 1); + void *p = auth + 1; + struct ceph_x_challenge_blob tmp; + char tmp_enc[40]; + u64 *u; + + if (p > end) + return -ERANGE; + + dout(" get_auth_session_key\n"); + head->op = cpu_to_le16(CEPHX_GET_AUTH_SESSION_KEY); + + /* encrypt and hash */ + get_random_bytes(&auth->client_challenge, sizeof(u64)); + tmp.client_challenge = auth->client_challenge; + tmp.server_challenge = cpu_to_le64(xi->server_challenge); + ret = ceph_x_encrypt(&xi->secret, &tmp, sizeof(tmp), + tmp_enc, sizeof(tmp_enc)); + if (ret < 0) + return ret; + + auth->struct_v = 1; + auth->key = 0; + for (u = (u64 *)tmp_enc; u + 1 <= (u64 *)(tmp_enc + ret); u++) + auth->key ^= *u; + dout(" server_challenge %llx client_challenge %llx key %llx\n", + xi->server_challenge, le64_to_cpu(auth->client_challenge), + le64_to_cpu(auth->key)); + + /* now encode the old ticket if exists */ + ret = ceph_x_encode_ticket(th, &p, end); + if (ret < 0) + return ret; + + return p - buf; + } + + if (need) { + void *p = head + 1; + struct ceph_x_service_ticket_request *req; + + if (p > end) + return -ERANGE; + head->op = cpu_to_le16(CEPHX_GET_PRINCIPAL_SESSION_KEY); + + BUG_ON(!th); + ret = ceph_x_build_authorizer(ac, th, &xi->auth_authorizer); + if (ret) + return ret; + ceph_encode_copy(&p, xi->auth_authorizer.buf->vec.iov_base, + xi->auth_authorizer.buf->vec.iov_len); + + req = p; + req->keys = cpu_to_le32(need); + p += sizeof(*req); + return p - buf; + } + + return 0; +} + +static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result, + void *buf, void *end) +{ + struct ceph_x_info *xi = ac->private; + struct ceph_x_reply_header *head = buf; + struct ceph_x_ticket_handler *th; + int len = end - buf; + int op; + int ret; + + if (result) + return result; /* XXX hmm? */ + + if (xi->starting) { + /* it's a hello */ + struct ceph_x_server_challenge *sc = buf; + + if (len != sizeof(*sc)) + return -EINVAL; + xi->server_challenge = le64_to_cpu(sc->server_challenge); + dout("handle_reply got server challenge %llx\n", + xi->server_challenge); + xi->starting = false; + xi->have_keys &= ~CEPH_ENTITY_TYPE_AUTH; + return -EAGAIN; + } + + op = le32_to_cpu(head->op); + result = le32_to_cpu(head->result); + dout("handle_reply op %d result %d\n", op, result); + switch (op) { + case CEPHX_GET_AUTH_SESSION_KEY: + /* verify auth key */ + ret = ceph_x_proc_ticket_reply(ac, &xi->secret, + buf + sizeof(*head), end); + break; + + case CEPHX_GET_PRINCIPAL_SESSION_KEY: + th = get_ticket_handler(ac, CEPH_ENTITY_TYPE_AUTH); + BUG_ON(!th); + ret = ceph_x_proc_ticket_reply(ac, &th->session_key, + buf + sizeof(*head), end); + break; + + default: + return -EINVAL; + } + if (ret) + return ret; + if (ac->want_keys == xi->have_keys) + return 0; + return -EAGAIN; +} + +static int ceph_x_create_authorizer( + struct ceph_auth_client *ac, int peer_type, + struct ceph_authorizer **a, + void **buf, size_t *len, + void **reply_buf, size_t *reply_len) +{ + struct ceph_x_authorizer *au; + struct ceph_x_ticket_handler *th; + int ret; + + th = get_ticket_handler(ac, peer_type); + if (IS_ERR(th)) + return PTR_ERR(th); + + au = kzalloc(sizeof(*au), GFP_NOFS); + if (!au) + return -ENOMEM; + + ret = ceph_x_build_authorizer(ac, th, au); + if (ret) { + kfree(au); + return ret; + } + + *a = (struct ceph_authorizer *)au; + *buf = au->buf->vec.iov_base; + *len = au->buf->vec.iov_len; + *reply_buf = au->reply_buf; + *reply_len = sizeof(au->reply_buf); + return 0; +} + +static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac, + struct ceph_authorizer *a, size_t len) +{ + struct ceph_x_authorizer *au = (void *)a; + struct ceph_x_ticket_handler *th; + int ret = 0; + struct ceph_x_authorize_reply reply; + void *p = au->reply_buf; + void *end = p + sizeof(au->reply_buf); + + th = get_ticket_handler(ac, au->service); + if (!th) + return -EIO; /* hrm! */ + ret = ceph_x_decrypt(&th->session_key, &p, end, &reply, sizeof(reply)); + if (ret < 0) + return ret; + if (ret != sizeof(reply)) + return -EPERM; + + if (au->nonce + 1 != le64_to_cpu(reply.nonce_plus_one)) + ret = -EPERM; + else + ret = 0; + dout("verify_authorizer_reply nonce %llx got %llx ret %d\n", + au->nonce, le64_to_cpu(reply.nonce_plus_one), ret); + return ret; +} + +static void ceph_x_destroy_authorizer(struct ceph_auth_client *ac, + struct ceph_authorizer *a) +{ + struct ceph_x_authorizer *au = (void *)a; + + ceph_buffer_put(au->buf); + kfree(au); +} + + +static void ceph_x_reset(struct ceph_auth_client *ac) +{ + struct ceph_x_info *xi = ac->private; + + dout("reset\n"); + xi->starting = true; + xi->server_challenge = 0; +} + +static void ceph_x_destroy(struct ceph_auth_client *ac) +{ + struct ceph_x_info *xi = ac->private; + struct rb_node *p; + + dout("ceph_x_destroy %p\n", ac); + ceph_crypto_key_destroy(&xi->secret); + + while ((p = rb_first(&xi->ticket_handlers)) != NULL) { + struct ceph_x_ticket_handler *th = + rb_entry(p, struct ceph_x_ticket_handler, node); + remove_ticket_handler(ac, th); + } + + kmem_cache_destroy(ceph_x_ticketbuf_cachep); + + kfree(ac->private); + ac->private = NULL; +} + +static void ceph_x_invalidate_authorizer(struct ceph_auth_client *ac, + int peer_type) +{ + struct ceph_x_ticket_handler *th; + + th = get_ticket_handler(ac, peer_type); + if (th && !IS_ERR(th)) + remove_ticket_handler(ac, th); +} + + +static const struct ceph_auth_client_ops ceph_x_ops = { + .is_authenticated = ceph_x_is_authenticated, + .build_request = ceph_x_build_request, + .handle_reply = ceph_x_handle_reply, + .create_authorizer = ceph_x_create_authorizer, + .verify_authorizer_reply = ceph_x_verify_authorizer_reply, + .destroy_authorizer = ceph_x_destroy_authorizer, + .invalidate_authorizer = ceph_x_invalidate_authorizer, + .reset = ceph_x_reset, + .destroy = ceph_x_destroy, +}; + + +int ceph_x_init(struct ceph_auth_client *ac) +{ + struct ceph_x_info *xi; + int ret; + + dout("ceph_x_init %p\n", ac); + xi = kzalloc(sizeof(*xi), GFP_NOFS); + if (!xi) + return -ENOMEM; + + ret = -ENOMEM; + ceph_x_ticketbuf_cachep = kmem_cache_create("ceph_x_ticketbuf", + TEMP_TICKET_BUF_LEN, 8, + (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD), + NULL); + if (!ceph_x_ticketbuf_cachep) + goto done_nomem; + ret = -EINVAL; + if (!ac->secret) { + pr_err("no secret set (for auth_x protocol)\n"); + goto done_nomem; + } + + ret = ceph_crypto_key_unarmor(&xi->secret, ac->secret); + if (ret) + goto done_nomem; + + xi->starting = true; + xi->ticket_handlers = RB_ROOT; + + ac->protocol = CEPH_AUTH_CEPHX; + ac->private = xi; + ac->ops = &ceph_x_ops; + return 0; + +done_nomem: + kfree(xi); + if (ceph_x_ticketbuf_cachep) + kmem_cache_destroy(ceph_x_ticketbuf_cachep); + return ret; +} + + diff --git a/fs/ceph/auth_x.h b/fs/ceph/auth_x.h new file mode 100644 index 000000000000..ff6f8180e681 --- /dev/null +++ b/fs/ceph/auth_x.h @@ -0,0 +1,49 @@ +#ifndef _FS_CEPH_AUTH_X_H +#define _FS_CEPH_AUTH_X_H + +#include + +#include "crypto.h" +#include "auth.h" +#include "auth_x_protocol.h" + +/* + * Handle ticket for a single service. + */ +struct ceph_x_ticket_handler { + struct rb_node node; + unsigned service; + + struct ceph_crypto_key session_key; + struct ceph_timespec validity; + + u64 secret_id; + struct ceph_buffer *ticket_blob; + + unsigned long renew_after, expires; +}; + + +struct ceph_x_authorizer { + struct ceph_buffer *buf; + unsigned service; + u64 nonce; + char reply_buf[128]; /* big enough for encrypted blob */ +}; + +struct ceph_x_info { + struct ceph_crypto_key secret; + + bool starting; + u64 server_challenge; + + unsigned have_keys; + struct rb_root ticket_handlers; + + struct ceph_x_authorizer auth_authorizer; +}; + +extern int ceph_x_init(struct ceph_auth_client *ac); + +#endif + diff --git a/fs/ceph/auth_x_protocol.h b/fs/ceph/auth_x_protocol.h new file mode 100644 index 000000000000..671d30576c4f --- /dev/null +++ b/fs/ceph/auth_x_protocol.h @@ -0,0 +1,90 @@ +#ifndef __FS_CEPH_AUTH_X_PROTOCOL +#define __FS_CEPH_AUTH_X_PROTOCOL + +#define CEPHX_GET_AUTH_SESSION_KEY 0x0100 +#define CEPHX_GET_PRINCIPAL_SESSION_KEY 0x0200 +#define CEPHX_GET_ROTATING_KEY 0x0400 + +/* common bits */ +struct ceph_x_ticket_blob { + __u8 struct_v; + __le64 secret_id; + __le32 blob_len; + char blob[]; +} __attribute__ ((packed)); + + +/* common request/reply headers */ +struct ceph_x_request_header { + __le16 op; +} __attribute__ ((packed)); + +struct ceph_x_reply_header { + __le16 op; + __le32 result; +} __attribute__ ((packed)); + + +/* authenticate handshake */ + +/* initial hello (no reply header) */ +struct ceph_x_server_challenge { + __u8 struct_v; + __le64 server_challenge; +} __attribute__ ((packed)); + +struct ceph_x_authenticate { + __u8 struct_v; + __le64 client_challenge; + __le64 key; + /* ticket blob */ +} __attribute__ ((packed)); + +struct ceph_x_service_ticket_request { + __u8 struct_v; + __le32 keys; +} __attribute__ ((packed)); + +struct ceph_x_challenge_blob { + __le64 server_challenge; + __le64 client_challenge; +} __attribute__ ((packed)); + + + +/* authorize handshake */ + +/* + * The authorizer consists of two pieces: + * a - service id, ticket blob + * b - encrypted with session key + */ +struct ceph_x_authorize_a { + __u8 struct_v; + __le64 global_id; + __le32 service_id; + struct ceph_x_ticket_blob ticket_blob; +} __attribute__ ((packed)); + +struct ceph_x_authorize_b { + __u8 struct_v; + __le64 nonce; +} __attribute__ ((packed)); + +struct ceph_x_authorize_reply { + __u8 struct_v; + __le64 nonce_plus_one; +} __attribute__ ((packed)); + + +/* + * encyption bundle + */ +#define CEPHX_ENC_MAGIC 0xff009cad8826aa55ull + +struct ceph_x_encrypt_header { + __u8 struct_v; + __le64 magic; +} __attribute__ ((packed)); + +#endif -- cgit v1.2.3 From b056c8769d1da6a6a80ce780a4b8957b70434a41 Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Wed, 3 Feb 2010 10:47:48 -0800 Subject: ceph: remove unused variable Signed-off-by: Yehuda Sadeh Signed-off-by: Sage Weil --- fs/ceph/addr.c | 1 - 1 file changed, 1 deletion(-) diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 8065dc92c611..92f482150742 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -505,7 +505,6 @@ static void writepages_finish(struct ceph_osd_request *req, struct ceph_osd_op *op; struct ceph_inode_info *ci = ceph_inode(inode); unsigned wrote; - loff_t offset = req->r_pages[0]->index << PAGE_CACHE_SHIFT; struct page *page; int i; struct ceph_snap_context *snapc = req->r_snapc; -- cgit v1.2.3 From f5a2041bd96c9f05ff10172b9c814c14f247084e Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Wed, 3 Feb 2010 11:00:26 -0800 Subject: ceph: put unused osd connections on lru Instead of removing osd connection immediately when the requests list is empty, put the osd connection on an lru. Only if that osd has not been used for more than a specified time, will it be removed. Signed-off-by: Yehuda Sadeh Signed-off-by: Sage Weil --- fs/ceph/osd_client.c | 76 +++++++++++++++++++++++++++++++++++++++++++++------- fs/ceph/osd_client.h | 4 +++ fs/ceph/super.c | 3 +++ fs/ceph/super.h | 2 ++ 4 files changed, 76 insertions(+), 9 deletions(-) diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index 35c8afea13ec..7f8a26fdcc2c 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -389,6 +389,7 @@ static struct ceph_osd *create_osd(struct ceph_osd_client *osdc) atomic_set(&osd->o_ref, 1); osd->o_osdc = osdc; INIT_LIST_HEAD(&osd->o_requests); + INIT_LIST_HEAD(&osd->o_osd_lru); osd->o_incarnation = 1; ceph_con_init(osdc->client->msgr, &osd->o_con); @@ -422,25 +423,56 @@ static void put_osd(struct ceph_osd *osd) /* * remove an osd from our map */ -static void remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) +static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) { - dout("remove_osd %p\n", osd); + dout("__remove_osd %p\n", osd); BUG_ON(!list_empty(&osd->o_requests)); rb_erase(&osd->o_node, &osdc->osds); + list_del_init(&osd->o_osd_lru); ceph_con_close(&osd->o_con); put_osd(osd); } +static void __move_osd_to_lru(struct ceph_osd_client *osdc, + struct ceph_osd *osd) +{ + dout("__move_osd_to_lru %p\n", osd); + BUG_ON(!list_empty(&osd->o_osd_lru)); + list_add_tail(&osd->o_osd_lru, &osdc->osd_lru); + osd->lru_ttl = jiffies + osdc->client->mount_args->osd_idle_ttl * HZ; +} + +static void __remove_osd_from_lru(struct ceph_osd *osd) +{ + dout("__remove_osd_from_lru %p\n", osd); + if (!list_empty(&osd->o_osd_lru)) + list_del_init(&osd->o_osd_lru); +} + +static void remove_old_osds(struct ceph_osd_client *osdc, int remove_all) +{ + struct ceph_osd *osd, *nosd; + + dout("__remove_old_osds %p\n", osdc); + mutex_lock(&osdc->request_mutex); + list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) { + if (!remove_all && time_before(jiffies, osd->lru_ttl)) + break; + __remove_osd(osdc, osd); + } + mutex_unlock(&osdc->request_mutex); +} + /* * reset osd connect */ -static int reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) +static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) { int ret = 0; - dout("reset_osd %p osd%d\n", osd, osd->o_osd); + dout("__reset_osd %p osd%d\n", osd, osd->o_osd); if (list_empty(&osd->o_requests)) { - remove_osd(osdc, osd); + __remove_osd(osdc, osd); } else { ceph_con_close(&osd->o_con); ceph_con_open(&osd->o_con, &osdc->osdmap->osd_addr[osd->o_osd]); @@ -533,7 +565,7 @@ static void __unregister_request(struct ceph_osd_client *osdc, list_del_init(&req->r_osd_item); if (list_empty(&req->r_osd->o_requests)) - remove_osd(osdc, req->r_osd); + __move_osd_to_lru(osdc, req->r_osd); req->r_osd = NULL; } @@ -611,7 +643,7 @@ static int __map_osds(struct ceph_osd_client *osdc, if (list_empty(&req->r_osd->o_requests)) { /* try to re-use r_osd if possible */ newosd = get_osd(req->r_osd); - remove_osd(osdc, newosd); + __remove_osd(osdc, newosd); } req->r_osd = NULL; } @@ -636,8 +668,10 @@ static int __map_osds(struct ceph_osd_client *osdc, ceph_con_open(&req->r_osd->o_con, &osdc->osdmap->osd_addr[o]); } - if (req->r_osd) + if (req->r_osd) { + __remove_osd_from_lru(req->r_osd); list_add(&req->r_osd_item, &req->r_osd->o_requests); + } err = 1; /* osd changed */ out: @@ -744,6 +778,23 @@ static void handle_timeout(struct work_struct *work) up_read(&osdc->map_sem); } +static void handle_osds_timeout(struct work_struct *work) +{ + struct ceph_osd_client *osdc = + container_of(work, struct ceph_osd_client, + osds_timeout_work.work); + unsigned long delay = + osdc->client->mount_args->osd_idle_ttl * HZ >> 2; + + dout("osds timeout\n"); + down_read(&osdc->map_sem); + remove_old_osds(osdc, 0); + up_read(&osdc->map_sem); + + schedule_delayed_work(&osdc->osds_timeout_work, + round_jiffies_relative(delay)); +} + /* * handle osd op reply. either call the callback if it is specified, * or do the completion to wake up the waiting thread. @@ -881,7 +932,7 @@ static void kick_requests(struct ceph_osd_client *osdc, ceph_osd_addr(osdc->osdmap, osd->o_osd), sizeof(struct ceph_entity_addr)) != 0) - reset_osd(osdc, osd); + __reset_osd(osdc, osd); } } @@ -1195,9 +1246,14 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client) osdc->timeout_tid = 0; osdc->last_tid = 0; osdc->osds = RB_ROOT; + INIT_LIST_HEAD(&osdc->osd_lru); osdc->requests = RB_ROOT; osdc->num_requests = 0; INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout); + INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout); + + schedule_delayed_work(&osdc->osds_timeout_work, + round_jiffies_relative(osdc->client->mount_args->osd_idle_ttl * HZ)); err = -ENOMEM; osdc->req_mempool = mempool_create_kmalloc_pool(10, @@ -1219,10 +1275,12 @@ out: void ceph_osdc_stop(struct ceph_osd_client *osdc) { cancel_delayed_work_sync(&osdc->timeout_work); + cancel_delayed_work_sync(&osdc->osds_timeout_work); if (osdc->osdmap) { ceph_osdmap_destroy(osdc->osdmap); osdc->osdmap = NULL; } + remove_old_osds(osdc, 1); mempool_destroy(osdc->req_mempool); ceph_msgpool_destroy(&osdc->msgpool_op); } diff --git a/fs/ceph/osd_client.h b/fs/ceph/osd_client.h index 8d533d9406ff..70f31b61f02c 100644 --- a/fs/ceph/osd_client.h +++ b/fs/ceph/osd_client.h @@ -31,9 +31,11 @@ struct ceph_osd { struct rb_node o_node; struct ceph_connection o_con; struct list_head o_requests; + struct list_head o_osd_lru; struct ceph_authorizer *o_authorizer; void *o_authorizer_buf, *o_authorizer_reply_buf; size_t o_authorizer_buf_len, o_authorizer_reply_buf_len; + unsigned long lru_ttl; }; /* an in-flight request */ @@ -90,11 +92,13 @@ struct ceph_osd_client { struct mutex request_mutex; struct rb_root osds; /* osds */ + struct list_head osd_lru; /* idle osds */ u64 timeout_tid; /* tid of timeout triggering rq */ u64 last_tid; /* tid of last request */ struct rb_root requests; /* pending requests */ int num_requests; struct delayed_work timeout_work; + struct delayed_work osds_timeout_work; #ifdef CONFIG_DEBUG_FS struct dentry *debugfs_file; #endif diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 3a2548951fe6..39aaf29a04a0 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -293,6 +293,7 @@ enum { Opt_rsize, Opt_osdtimeout, Opt_mount_timeout, + Opt_osd_idle_ttl, Opt_caps_wanted_delay_min, Opt_caps_wanted_delay_max, Opt_readdir_max_entries, @@ -322,6 +323,7 @@ static match_table_t arg_tokens = { {Opt_rsize, "rsize=%d"}, {Opt_osdtimeout, "osdtimeout=%d"}, {Opt_mount_timeout, "mount_timeout=%d"}, + {Opt_osd_idle_ttl, "osd_idle_ttl=%d"}, {Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"}, {Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"}, {Opt_readdir_max_entries, "readdir_max_entries=%d"}, @@ -367,6 +369,7 @@ static struct ceph_mount_args *parse_mount_args(int flags, char *options, args->flags = CEPH_OPT_DEFAULT; args->osd_timeout = 5; /* seconds */ args->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; /* seconds */ + args->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT; /* seconds */ args->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT; args->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT; args->rsize = CEPH_MOUNT_RSIZE_DEFAULT; diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 770f7b507fce..3930fb685f0b 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -53,6 +53,7 @@ struct ceph_mount_args { struct ceph_entity_addr *mon_addr; int flags; int mount_timeout; + int osd_idle_ttl; int caps_wanted_delay_min, caps_wanted_delay_max; struct ceph_fsid fsid; struct ceph_entity_addr my_addr; @@ -71,6 +72,7 @@ struct ceph_mount_args { * defaults */ #define CEPH_MOUNT_TIMEOUT_DEFAULT 60 +#define CEPH_OSD_IDLE_TTL_DEFAULT 60 #define CEPH_MOUNT_RSIZE_DEFAULT (512*1024) /* readahead */ #define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024) -- cgit v1.2.3 From 02f90c61096ec3ad691e808a4aa7ca5a06e550ec Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Thu, 4 Feb 2010 16:18:10 -0800 Subject: ceph: add uid field to ceph_pg_pool Also verify encoding version as we go. Signed-off-by: Sage Weil --- fs/ceph/osdmap.c | 20 ++++++++++++++++++++ fs/ceph/rados.h | 8 ++++++++ 2 files changed, 28 insertions(+) diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c index a143c51c2cfb..a6afe3836f7e 100644 --- a/fs/ceph/osdmap.c +++ b/fs/ceph/osdmap.c @@ -426,6 +426,11 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) map->pg_temp = RB_ROOT; ceph_decode_16_safe(p, end, version, bad); + if (version > CEPH_OSDMAP_VERSION) { + pr_warning("got unknown v %d > %d of osdmap\n", version, + CEPH_OSDMAP_VERSION); + goto bad; + } ceph_decode_need(p, end, 2*sizeof(u64)+6*sizeof(u32), bad); ceph_decode_copy(p, &map->fsid, sizeof(map->fsid)); @@ -447,6 +452,11 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) if (i >= map->num_pools) goto bad; ev = ceph_decode_8(p); /* encoding version */ + if (ev > CEPH_PG_POOL_VERSION) { + pr_warning("got unknown v %d > %d of ceph_pg_pool\n", + ev, CEPH_PG_POOL_VERSION); + goto bad; + } ceph_decode_copy(p, &map->pg_pool[i].v, sizeof(map->pg_pool->v)); calc_pg_masks(&map->pg_pool[i]); @@ -552,6 +562,11 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, struct rb_node *rbp; ceph_decode_16_safe(p, end, version, bad); + if (version > CEPH_OSDMAP_INC_VERSION) { + pr_warning("got unknown v %d > %d of inc osdmap\n", version, + CEPH_OSDMAP_INC_VERSION); + goto bad; + } ceph_decode_need(p, end, sizeof(fsid)+sizeof(modified)+2*sizeof(u32), bad); @@ -624,6 +639,11 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, } ceph_decode_need(p, end, 1 + sizeof(map->pg_pool->v), bad); ev = ceph_decode_8(p); /* encoding version */ + if (ev > CEPH_PG_POOL_VERSION) { + pr_warning("got unknown v %d > %d of ceph_pg_pool\n", + ev, CEPH_PG_POOL_VERSION); + goto bad; + } ceph_decode_copy(p, &map->pg_pool[pool].v, sizeof(map->pg_pool->v)); calc_pg_masks(&map->pg_pool[pool]); diff --git a/fs/ceph/rados.h b/fs/ceph/rados.h index 123fd845459e..1f4c78640541 100644 --- a/fs/ceph/rados.h +++ b/fs/ceph/rados.h @@ -8,6 +8,12 @@ #include "msgr.h" +/* + * osdmap encoding versions + */ +#define CEPH_OSDMAP_INC_VERSION 3 +#define CEPH_OSDMAP_VERSION 3 + /* * fs id */ @@ -80,6 +86,7 @@ struct ceph_pg { */ #define CEPH_PG_TYPE_REP 1 #define CEPH_PG_TYPE_RAID4 2 +#define CEPH_PG_POOL_VERSION 2 struct ceph_pg_pool { __u8 type; /* CEPH_PG_TYPE_* */ __u8 size; /* number of osds in each pg */ @@ -92,6 +99,7 @@ struct ceph_pg_pool { __le32 snap_epoch; /* epoch of last snap */ __le32 num_snaps; __le32 num_removed_snap_intervals; + __le64 uid; } __attribute__ ((packed)); /* -- cgit v1.2.3 From 972f0d3ab1a15cb5d790dd8c53903066084b28f2 Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Thu, 4 Feb 2010 13:41:41 -0800 Subject: ceph: fix short synchronous reads Zeroing of holes was not done correctly: page_off was miscalculated and zeroing the tail didn't not adjust the 'read' value to include the zeroed portion. Signed-off-by: Yehuda Sadeh Signed-off-by: Sage Weil --- fs/ceph/file.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 2d88c805a56c..43bd2f2e51a5 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -395,23 +395,22 @@ static void zero_page_vector_range(int off, int len, struct page **pages) { int i = off >> PAGE_CACHE_SHIFT; + off &= ~PAGE_CACHE_MASK; + dout("zero_page_vector_page %u~%u\n", off, len); - BUG_ON(len < PAGE_CACHE_SIZE); /* leading partial page? */ - if (off & ~PAGE_CACHE_MASK) { + if (off) { + int end = min((int)PAGE_CACHE_SIZE, off + len); dout("zeroing %d %p head from %d\n", i, pages[i], - (int)(off & ~PAGE_CACHE_MASK)); - zero_user_segment(pages[i], off & ~PAGE_CACHE_MASK, - PAGE_CACHE_SIZE); - off += PAGE_CACHE_SIZE; - off &= PAGE_CACHE_MASK; + (int)off); + zero_user_segment(pages[i], off, end); + len -= (end - off); i++; } while (len >= PAGE_CACHE_SIZE) { dout("zeroing %d %p\n", i, pages[i]); zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE); - off += PAGE_CACHE_SIZE; len -= PAGE_CACHE_SIZE; i++; } @@ -437,7 +436,7 @@ static int striped_read(struct inode *inode, struct ceph_client *client = ceph_inode_to_client(inode); struct ceph_inode_info *ci = ceph_inode(inode); u64 pos, this_len; - int page_off = off & ~PAGE_CACHE_SIZE; /* first byte's offset in page */ + int page_off = off & ~PAGE_CACHE_MASK; /* first byte's offset in page */ int left, pages_left; int read; struct page **page_pos; @@ -493,6 +492,7 @@ more: dout("zero tail\n"); zero_page_vector_range(page_off + read, len - read, pages); + read = len; goto out; } -- cgit v1.2.3 From 4af6b2257ee0eb8f4bf3b1dc8acb643c0e8a887f Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Tue, 9 Feb 2010 11:02:51 -0800 Subject: ceph: refactor ceph_write_begin, fix ceph_page_mkwrite Originally ceph_page_mkwrite called ceph_write_begin, hoping that the returned locked page would be the page that it was requested to mkwrite. Factored out relevant part of ceph_page_mkwrite and we lock the right page anyway. Signed-off-by: Yehuda Sadeh Signed-off-by: Sage Weil --- fs/ceph/addr.c | 80 +++++++++++++++++++++++++++++++++++++--------------------- 1 file changed, 51 insertions(+), 29 deletions(-) diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 92f482150742..89c5ff3b59d5 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -907,15 +907,13 @@ static int context_is_writeable_or_written(struct inode *inode, * We are only allowed to write into/dirty the page if the page is * clean, or already dirty within the same snap context. */ -static int ceph_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, - struct page **pagep, void **fsdata) +static int ceph_update_writeable_page(struct file *file, + loff_t pos, unsigned len, + struct page *page) { struct inode *inode = file->f_dentry->d_inode; struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc; - struct page *page; - pgoff_t index = pos >> PAGE_CACHE_SHIFT; loff_t page_off = pos & PAGE_CACHE_MASK; int pos_in_page = pos & ~PAGE_CACHE_MASK; int end_in_page = pos_in_page + len; @@ -923,16 +921,6 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping, struct ceph_snap_context *snapc; int r; - /* get a page*/ -retry: - page = grab_cache_page_write_begin(mapping, index, 0); - if (!page) - return -ENOMEM; - *pagep = page; - - dout("write_begin file %p inode %p page %p %d~%d\n", file, - inode, page, (int)pos, (int)len); - retry_locked: /* writepages currently holds page lock, but if we change that later, */ wait_on_page_writeback(page); @@ -964,7 +952,7 @@ retry_locked: wait_event_interruptible(ci->i_cap_wq, context_is_writeable_or_written(inode, snapc)); ceph_put_snap_context(snapc); - goto retry; + return -EAGAIN; } /* yay, writeable, do it now (without dropping page lock) */ @@ -1021,6 +1009,35 @@ fail_nosnap: return r; } +/* + * We are only allowed to write into/dirty the page if the page is + * clean, or already dirty within the same snap context. + */ +static int ceph_write_begin(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata) +{ + struct inode *inode = file->f_dentry->d_inode; + struct page *page; + pgoff_t index = pos >> PAGE_CACHE_SHIFT; + int r; + + do { + /* get a page*/ + page = grab_cache_page_write_begin(mapping, index, 0); + if (!page) + return -ENOMEM; + *pagep = page; + + dout("write_begin file %p inode %p page %p %d~%d\n", file, + inode, page, (int)pos, (int)len); + + r = ceph_update_writeable_page(file, pos, len, page); + } while (r == -EAGAIN); + + return r; +} + /* * we don't do anything in here that simple_write_end doesn't do * except adjust dirty page accounting and drop read lock on @@ -1104,8 +1121,6 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc; loff_t off = page->index << PAGE_CACHE_SHIFT; loff_t size, len; - struct page *locked_page = NULL; - void *fsdata = NULL; int ret; size = i_size_read(inode); @@ -1116,23 +1131,30 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) dout("page_mkwrite %p %llu~%llu page %p idx %lu\n", inode, off, len, page, page->index); - ret = ceph_write_begin(vma->vm_file, inode->i_mapping, off, len, 0, - &locked_page, &fsdata); - WARN_ON(page != locked_page); - if (!ret) { - /* - * doing the following, instead of calling - * ceph_write_end. Note that we keep the - * page locked - */ + + lock_page(page); + + ret = VM_FAULT_NOPAGE; + if ((off > size) || + (page->mapping != inode->i_mapping)) + goto out; + + ret = ceph_update_writeable_page(vma->vm_file, off, len, page); + if (ret == 0) { + /* success. we'll keep the page locked. */ set_page_dirty(page); up_read(&mdsc->snap_rwsem); - page_cache_release(page); ret = VM_FAULT_LOCKED; } else { - ret = VM_FAULT_SIGBUS; + if (ret == -ENOMEM) + ret = VM_FAULT_OOM; + else + ret = VM_FAULT_SIGBUS; } +out: dout("page_mkwrite %p %llu~%llu = %d\n", inode, off, len, ret); + if (ret != VM_FAULT_LOCKED) + unlock_page(page); return ret; } -- cgit v1.2.3 From 3d497d858ae6e5f23a28783030aecc69074e102d Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Tue, 9 Feb 2010 11:08:40 -0800 Subject: ceph: fix truncation when not holding caps A truncation should occur when either we have the specified caps for the file, or (in cases where we are not the only ones referencing the file) when it is mapped or when it is opened. The latter two cases were not handled. Signed-off-by: Yehuda Sadeh Signed-off-by: Sage Weil --- fs/ceph/inode.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index a4f573ab232e..af85f2de2f7c 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -416,9 +416,17 @@ int ceph_fill_file_size(struct inode *inode, int issued, dout("truncate_seq %u -> %u\n", ci->i_truncate_seq, truncate_seq); ci->i_truncate_seq = truncate_seq; - if (issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_RD| + /* + * If we hold relevant caps, or in the case where we're + * not the only client referencing this file and we + * don't hold those caps, then we need to check whether + * the file is either opened or mmaped + */ + if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_RD| CEPH_CAP_FILE_WR|CEPH_CAP_FILE_BUFFER| - CEPH_CAP_FILE_EXCL)) { + CEPH_CAP_FILE_EXCL)) || + mapping_mapped(inode->i_mapping) || + __ceph_caps_file_wanted(ci)) { ci->i_truncate_pending++; queue_trunc = 1; } -- cgit v1.2.3 From 29065a513aa4c7e4b46b77cbcd25f814a4ca0bfe Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Tue, 9 Feb 2010 11:14:41 -0800 Subject: ceph: sync read/write considers page cache In the cases where we either do a sync read or a write, we need to make sure that everything in the page cache is flushed. In the case of a sync write we invalidate the relevant pages, so that subsequent read/write reflects the new data written. Signed-off-by: Yehuda Sadeh Signed-off-by: Sage Weil --- fs/ceph/file.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 43bd2f2e51a5..bbf1ccf2d56e 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -409,7 +409,7 @@ static void zero_page_vector_range(int off, int len, struct page **pages) i++; } while (len >= PAGE_CACHE_SIZE) { - dout("zeroing %d %p\n", i, pages[i]); + dout("zeroing %d %p len=%d\n", i, pages[i], len); zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE); len -= PAGE_CACHE_SIZE; i++; @@ -542,13 +542,16 @@ static ssize_t ceph_sync_read(struct file *file, char __user *data, * but it will at least behave sensibly when they are * in sequence. */ - filemap_write_and_wait(inode->i_mapping); } else { pages = alloc_page_vector(num_pages); } if (IS_ERR(pages)) return PTR_ERR(pages); + ret = filemap_write_and_wait(inode->i_mapping); + if (ret < 0) + goto done; + ret = striped_read(inode, off, len, pages, num_pages); if (ret >= 0 && (file->f_flags & O_DIRECT) == 0) @@ -556,6 +559,7 @@ static ssize_t ceph_sync_read(struct file *file, char __user *data, if (ret >= 0) *poff = off + ret; +done: if (file->f_flags & O_DIRECT) put_page_vector(pages, num_pages); else @@ -617,6 +621,16 @@ static ssize_t ceph_sync_write(struct file *file, const char __user *data, else pos = *offset; + ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + left); + if (ret < 0) + return ret; + + ret = invalidate_inode_pages2_range(inode->i_mapping, + pos >> PAGE_CACHE_SHIFT, + (pos + left) >> PAGE_CACHE_SHIFT); + if (ret < 0) + dout("invalidate_inode_pages2_range returned %d\n", ret); + flags = CEPH_OSD_FLAG_ORDERSNAP | CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE; -- cgit v1.2.3 From cbd03635913a86afb7c2cfc0058932956b05b51e Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 9 Feb 2010 13:41:18 -0800 Subject: ceph: cap revocation fixes Try to invalidate pages in ceph_check_caps() if FILE_CACHE is being revoked. If we fail, queue an immediate async invalidate if FILE_CACHE is being revoked. (If it's not being revoked, we just queue the caps for later evaluation later, as per the old behavior.) Signed-off-by: Sage Weil --- fs/ceph/caps.c | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 847ae64346fe..822f7d3632fe 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -1374,12 +1374,13 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags, int file_wanted, used; int took_snap_rwsem = 0; /* true if mdsc->snap_rwsem held */ int drop_session_lock = session ? 0 : 1; - int want, retain, revoking, flushing = 0; + int issued, implemented, want, retain, revoking, flushing = 0; int mds = -1; /* keep track of how far we've gone through i_caps list to avoid an infinite loop on retry */ struct rb_node *p; int tried_invalidate = 0; int delayed = 0, sent = 0, force_requeue = 0, num; + int queue_invalidate = 0; int is_delayed = flags & CHECK_CAPS_NODELAY; /* if we are unmounting, flush any unused caps immediately. */ @@ -1401,6 +1402,8 @@ retry_locked: file_wanted = __ceph_caps_file_wanted(ci); used = __ceph_caps_used(ci); want = file_wanted | used; + issued = __ceph_caps_issued(ci, &implemented); + revoking = implemented & ~issued; retain = want | CEPH_CAP_PIN; if (!mdsc->stopping && inode->i_nlink > 0) { @@ -1419,11 +1422,11 @@ retry_locked: } dout("check_caps %p file_want %s used %s dirty %s flushing %s" - " issued %s retain %s %s%s%s\n", inode, + " issued %s revoking %s retain %s %s%s%s\n", inode, ceph_cap_string(file_wanted), ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps), ceph_cap_string(ci->i_flushing_caps), - ceph_cap_string(__ceph_caps_issued(ci, NULL)), + ceph_cap_string(issued), ceph_cap_string(revoking), ceph_cap_string(retain), (flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "", (flags & CHECK_CAPS_NODELAY) ? " NODELAY" : "", @@ -1437,7 +1440,8 @@ retry_locked: if ((!is_delayed || mdsc->stopping) && ci->i_wrbuffer_ref == 0 && /* no dirty pages... */ ci->i_rdcache_gen && /* may have cached pages */ - file_wanted == 0 && /* no open files */ + (file_wanted == 0 || /* no open files */ + (revoking & CEPH_CAP_FILE_CACHE)) && /* or revoking cache */ !ci->i_truncate_pending && !tried_invalidate) { u32 invalidating_gen = ci->i_rdcache_gen; @@ -1451,6 +1455,10 @@ retry_locked: /* success. */ ci->i_rdcache_gen = 0; ci->i_rdcache_revoking = 0; + } else if (revoking & CEPH_CAP_FILE_CACHE) { + dout("check_caps queuing invalidate\n"); + queue_invalidate = 1; + ci->i_rdcache_revoking = ci->i_rdcache_gen; } else { dout("check_caps failed to invalidate pages\n"); /* we failed to invalidate pages. check these @@ -1476,7 +1484,7 @@ retry_locked: revoking = cap->implemented & ~cap->issued; if (revoking) - dout("mds%d revoking %s\n", cap->mds, + dout(" mds%d revoking %s\n", cap->mds, ceph_cap_string(revoking)); if (cap == ci->i_auth_cap && @@ -1591,6 +1599,10 @@ ack: spin_unlock(&inode->i_lock); + if (queue_invalidate) + if (ceph_queue_page_invalidation(inode)) + igrab(inode); + if (session && drop_session_lock) mutex_unlock(&session->s_mutex); if (took_snap_rwsem) -- cgit v1.2.3 From 68c283236a1e0772e1a469dd2ffc17afc300b07b Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 9 Feb 2010 13:41:47 -0800 Subject: ceph: do not retain caps that are being revoked Never retain caps in __send_cap() that are being revoked. Signed-off-by: Sage Weil --- fs/ceph/caps.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 822f7d3632fe..7f4841cd3a2b 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -1042,10 +1042,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap, struct ceph_inode_info *ci = cap->ci; struct inode *inode = &ci->vfs_inode; u64 cap_id = cap->cap_id; - int held = cap->issued | cap->implemented; - int revoking = cap->implemented & ~cap->issued; - int dropping = cap->issued & ~retain; - int keep; + int held, revoking, dropping, keep; u64 seq, issue_seq, mseq, time_warp_seq, follows; u64 size, max_size; struct timespec mtime, atime; @@ -1060,6 +1057,11 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap, int i; int ret; + held = cap->issued | cap->implemented; + revoking = cap->implemented & ~cap->issued; + retain &= ~revoking; + dropping = cap->issued & ~retain; + dout("__send_cap %p cap %p session %p %s -> %s (revoking %s)\n", inode, cap, cap->session, ceph_cap_string(held), ceph_cap_string(held & retain), -- cgit v1.2.3 From 6a026589ba333185c466c906376fe022a27a53f9 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 9 Feb 2010 14:04:02 -0800 Subject: ceph: fix sync read eof check deadlock If a sync read gets a short result from the OSD, it may need to do a getattr to see if it is short due to reaching end-of-file. The getattr was being done while holding a reference to FILE_RD, which can lead to a deadlock if the MDS is revoking that capability bit and can't process the getattr until it does. We fix this by setting a flag if EOF size validation is needed, and doing the getattr in ceph_aio_read, after the RD cap ref is dropped. If the read needs to be continued, we loop and continue traversing the file. Signed-off-by: Sage Weil --- fs/ceph/file.c | 39 ++++++++++++++++++++++++++------------- 1 file changed, 26 insertions(+), 13 deletions(-) diff --git a/fs/ceph/file.c b/fs/ceph/file.c index bbf1ccf2d56e..2c4ae4441cab 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -431,7 +431,8 @@ static void zero_page_vector_range(int off, int len, struct page **pages) */ static int striped_read(struct inode *inode, u64 off, u64 len, - struct page **pages, int num_pages) + struct page **pages, int num_pages, + int *checkeof) { struct ceph_client *client = ceph_inode_to_client(inode); struct ceph_inode_info *ci = ceph_inode(inode); @@ -497,15 +498,7 @@ more: } /* check i_size */ - ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE); - if (ret < 0) - goto out; - - /* hit EOF? */ - if (pos >= inode->i_size) - goto out; - - goto more; + *checkeof = 1; } out: @@ -522,7 +515,7 @@ out: * If the read spans object boundary, just do multiple reads. */ static ssize_t ceph_sync_read(struct file *file, char __user *data, - unsigned len, loff_t *poff) + unsigned len, loff_t *poff, int *checkeof) { struct inode *inode = file->f_dentry->d_inode; struct page **pages; @@ -552,7 +545,7 @@ static ssize_t ceph_sync_read(struct file *file, char __user *data, if (ret < 0) goto done; - ret = striped_read(inode, off, len, pages, num_pages); + ret = striped_read(inode, off, len, pages, num_pages, checkeof); if (ret >= 0 && (file->f_flags & O_DIRECT) == 0) ret = copy_page_vector_to_user(pages, data, off, ret); @@ -746,11 +739,14 @@ static ssize_t ceph_aio_read(struct kiocb *iocb, const struct iovec *iov, size_t len = iov->iov_len; struct inode *inode = filp->f_dentry->d_inode; struct ceph_inode_info *ci = ceph_inode(inode); + void *base = iov->iov_base; ssize_t ret; int got = 0; + int checkeof = 0, read = 0; dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n", inode, ceph_vinop(inode), pos, (unsigned)len, inode); +again: __ceph_do_pending_vmtruncate(inode); ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, CEPH_CAP_FILE_CACHE, &got, -1); @@ -764,7 +760,7 @@ static ssize_t ceph_aio_read(struct kiocb *iocb, const struct iovec *iov, (iocb->ki_filp->f_flags & O_DIRECT) || (inode->i_sb->s_flags & MS_SYNCHRONOUS)) /* hmm, this isn't really async... */ - ret = ceph_sync_read(filp, iov->iov_base, len, ppos); + ret = ceph_sync_read(filp, base, len, ppos, &checkeof); else ret = generic_file_aio_read(iocb, iov, nr_segs, pos); @@ -772,6 +768,23 @@ out: dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n", inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret); ceph_put_cap_refs(ci, got); + + if (checkeof && ret >= 0) { + int statret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE); + + /* hit EOF or hole? */ + if (statret == 0 && *ppos < inode->i_size) { + dout("aio_read sync_read hit hole, reading more\n"); + read += ret; + base += ret; + len -= ret; + checkeof = 0; + goto again; + } + } + if (ret >= 0) + ret += read; + return ret; } -- cgit v1.2.3 From 3c6f6b79a64db7f1c7abf09d693db3b0066784fb Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 9 Feb 2010 15:24:44 -0800 Subject: ceph: cleanup async writeback, truncation, invalidate helpers Grab inode ref in helper. Make work functions static, with consistent naming. Signed-off-by: Sage Weil --- fs/ceph/addr.c | 3 +-- fs/ceph/caps.c | 25 ++++++++--------------- fs/ceph/inode.c | 61 ++++++++++++++++++++++++++++++++++++++++++++++++--------- fs/ceph/super.h | 19 ++++-------------- 4 files changed, 65 insertions(+), 43 deletions(-) diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 89c5ff3b59d5..71f5ad1c1e26 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -947,8 +947,7 @@ retry_locked: */ snapc = ceph_get_snap_context((void *)page->private); unlock_page(page); - if (ceph_queue_writeback(inode)) - igrab(inode); + ceph_queue_writeback(inode); wait_event_interruptible(ci->i_cap_wq, context_is_writeable_or_written(inode, snapc)); ceph_put_snap_context(snapc); diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 7f4841cd3a2b..68ee78109224 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -1602,8 +1602,7 @@ ack: spin_unlock(&inode->i_lock); if (queue_invalidate) - if (ceph_queue_page_invalidation(inode)) - igrab(inode); + ceph_queue_invalidate(inode); if (session && drop_session_lock) mutex_unlock(&session->s_mutex); @@ -2178,7 +2177,7 @@ static int handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant, int wake = 0; int writeback = 0; int revoked_rdcache = 0; - int invalidate_async = 0; + int queue_invalidate = 0; int tried_invalidate = 0; int ret; @@ -2205,7 +2204,7 @@ restart: /* there were locked pages.. invalidate later in a separate thread. */ if (ci->i_rdcache_revoking != ci->i_rdcache_gen) { - invalidate_async = 1; + queue_invalidate = 1; ci->i_rdcache_revoking = ci->i_rdcache_gen; } } else { @@ -2319,21 +2318,15 @@ restart: } spin_unlock(&inode->i_lock); - if (writeback) { + if (writeback) /* * queue inode for writeback: we can't actually call * filemap_write_and_wait, etc. from message handler * context. */ - dout("queueing %p for writeback\n", inode); - if (ceph_queue_writeback(inode)) - igrab(inode); - } - if (invalidate_async) { - dout("queueing %p for page invalidation\n", inode); - if (ceph_queue_page_invalidation(inode)) - igrab(inode); - } + ceph_queue_writeback(inode); + if (queue_invalidate) + ceph_queue_invalidate(inode); if (wake) wake_up(&ci->i_cap_wq); return reply; @@ -2479,9 +2472,7 @@ static void handle_cap_trunc(struct inode *inode, spin_unlock(&inode->i_lock); if (queue_trunc) - if (queue_work(ceph_client(inode->i_sb)->trunc_wq, - &ci->i_vmtruncate_work)) - igrab(inode); + ceph_queue_vmtruncate(inode); } /* diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index af85f2de2f7c..58bdff09c2c1 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -28,7 +28,9 @@ static const struct inode_operations ceph_symlink_iops; -static void ceph_inode_invalidate_pages(struct work_struct *work); +static void ceph_invalidate_work(struct work_struct *work); +static void ceph_writeback_work(struct work_struct *work); +static void ceph_vmtruncate_work(struct work_struct *work); /* * find or create an inode, given the ceph ino number @@ -357,8 +359,8 @@ struct inode *ceph_alloc_inode(struct super_block *sb) INIT_LIST_HEAD(&ci->i_snap_realm_item); INIT_LIST_HEAD(&ci->i_snap_flush_item); - INIT_WORK(&ci->i_wb_work, ceph_inode_writeback); - INIT_WORK(&ci->i_pg_inv_work, ceph_inode_invalidate_pages); + INIT_WORK(&ci->i_wb_work, ceph_writeback_work); + INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work); INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work); @@ -675,9 +677,7 @@ no_change: /* queue truncate if we saw i_size decrease */ if (queue_trunc) - if (queue_work(ceph_client(inode->i_sb)->trunc_wq, - &ci->i_vmtruncate_work)) - igrab(inode); + ceph_queue_vmtruncate(inode); /* populate frag tree */ /* FIXME: move me up, if/when version reflects fragtree changes */ @@ -1243,7 +1243,18 @@ int ceph_inode_set_size(struct inode *inode, loff_t size) * Write back inode data in a worker thread. (This can't be done * in the message handler context.) */ -void ceph_inode_writeback(struct work_struct *work) +void ceph_queue_writeback(struct inode *inode) +{ + if (queue_work(ceph_inode_to_client(inode)->wb_wq, + &ceph_inode(inode)->i_wb_work)) { + dout("ceph_queue_invalidate %p\n", inode); + igrab(inode); + } else { + dout("ceph_queue_invalidate %p failed\n", inode); + } +} + +static void ceph_writeback_work(struct work_struct *work) { struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, i_wb_work); @@ -1254,11 +1265,25 @@ void ceph_inode_writeback(struct work_struct *work) iput(inode); } +/* + * queue an async invalidation + */ +void ceph_queue_invalidate(struct inode *inode) +{ + if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq, + &ceph_inode(inode)->i_pg_inv_work)) { + dout("ceph_queue_invalidate %p\n", inode); + igrab(inode); + } else { + dout("ceph_queue_invalidate %p failed\n", inode); + } +} + /* * Invalidate inode pages in a worker thread. (This can't be done * in the message handler context.) */ -static void ceph_inode_invalidate_pages(struct work_struct *work) +static void ceph_invalidate_work(struct work_struct *work) { struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, i_pg_inv_work); @@ -1307,7 +1332,7 @@ out: * * We also truncate in a separate thread as well. */ -void ceph_vmtruncate_work(struct work_struct *work) +static void ceph_vmtruncate_work(struct work_struct *work) { struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, i_vmtruncate_work); @@ -1320,6 +1345,24 @@ void ceph_vmtruncate_work(struct work_struct *work) iput(inode); } +/* + * Queue an async vmtruncate. If we fail to queue work, we will handle + * the truncation the next time we call __ceph_do_pending_vmtruncate. + */ +void ceph_queue_vmtruncate(struct inode *inode) +{ + struct ceph_inode_info *ci = ceph_inode(inode); + + if (queue_work(ceph_client(inode->i_sb)->trunc_wq, + &ci->i_vmtruncate_work)) { + dout("ceph_queue_vmtruncate %p\n", inode); + igrab(inode); + } else { + dout("ceph_queue_vmtruncate %p failed, pending=%d\n", + inode, ci->i_truncate_pending); + } +} + /* * called with i_mutex held. * diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 3930fb685f0b..b2adfccbab98 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -573,18 +573,6 @@ static inline struct ceph_client *ceph_sb_to_client(struct super_block *sb) return (struct ceph_client *)sb->s_fs_info; } -static inline int ceph_queue_writeback(struct inode *inode) -{ - return queue_work(ceph_inode_to_client(inode)->wb_wq, - &ceph_inode(inode)->i_wb_work); -} - -static inline int ceph_queue_page_invalidation(struct inode *inode) -{ - return queue_work(ceph_inode_to_client(inode)->pg_inv_wq, - &ceph_inode(inode)->i_pg_inv_work); -} - /* * we keep buffered readdir results attached to file->private_data @@ -772,10 +760,11 @@ extern int ceph_readdir_prepopulate(struct ceph_mds_request *req, extern int ceph_inode_holds_cap(struct inode *inode, int mask); extern int ceph_inode_set_size(struct inode *inode, loff_t size); -extern void ceph_inode_writeback(struct work_struct *work); -extern void ceph_vmtruncate_work(struct work_struct *work); extern void __ceph_do_pending_vmtruncate(struct inode *inode); -extern void __ceph_queue_vmtruncate(struct inode *inode); +extern void ceph_queue_vmtruncate(struct inode *inode); + +extern void ceph_queue_invalidate(struct inode *inode); +extern void ceph_queue_writeback(struct inode *inode); extern int ceph_do_getattr(struct inode *inode, int mask); extern int ceph_permission(struct inode *inode, int mask); -- cgit v1.2.3 From 0840d8af3e6e40bcd5f2366698eb2755f88acfea Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 9 Feb 2010 15:44:16 -0800 Subject: ceph: invalidate pages even if truncate is pending There is no reason not to invalidate pages when a truncate is pending. Both throw out page cache pages. Signed-off-by: Sage Weil --- fs/ceph/caps.c | 1 - 1 file changed, 1 deletion(-) diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 68ee78109224..20b28dc0c97c 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -1444,7 +1444,6 @@ retry_locked: ci->i_rdcache_gen && /* may have cached pages */ (file_wanted == 0 || /* no open files */ (revoking & CEPH_CAP_FILE_CACHE)) && /* or revoking cache */ - !ci->i_truncate_pending && !tried_invalidate) { u32 invalidating_gen = ci->i_rdcache_gen; int ret; -- cgit v1.2.3 From 8031049147c58d9d8b6226c3ac31a9d72d053e25 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 9 Feb 2010 16:43:11 -0800 Subject: ceph: remove bogus invalidate_mapping_pages We were invalidating mapping pages when dropping FILE_CACHE in __send_cap(). But ceph_check_caps attempts to invalidate already, and also checks for success, so we should never get to this point. Signed-off-by: Sage Weil --- fs/ceph/caps.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 20b28dc0c97c..ab9b571dda11 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -1137,12 +1137,6 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap, spin_unlock(&inode->i_lock); - if (dropping & CEPH_CAP_FILE_CACHE) { - /* invalidate what we can */ - dout("invalidating pages on %p\n", inode); - invalidate_mapping_pages(&inode->i_data, 0, -1); - } - ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id, op, keep, want, flushing, seq, flush_tid, issue_seq, mseq, size, max_size, &mtime, &atime, time_warp_seq, -- cgit v1.2.3 From 6c5d1a49e5e88ee831117f4b2375829933ad15da Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Sat, 13 Feb 2010 20:29:31 -0800 Subject: ceph: fix msgr to keep sent messages until acked The test was backwards from commit b3d1dbbd: keep the message if the connection _isn't_ lossy. This allows the client to continue when the TCP connection drops for some reason (network glitch) but both ends survive. Signed-off-by: Sage Weil --- fs/ceph/messenger.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index c4341784ec8f..44bdaf439924 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -463,11 +463,11 @@ static void prepare_write_message(struct ceph_connection *con) struct ceph_msg, list_head); con->out_msg = m; if (test_bit(LOSSYTX, &con->state)) { + list_del_init(&m->list_head); + } else { /* put message on sent list */ ceph_msg_get(m); list_move_tail(&m->list_head, &con->out_sent); - } else { - list_del_init(&m->list_head); } m->hdr.seq = cpu_to_le64(++con->out_seq); -- cgit v1.2.3 From 153a008bf7915ea9127341409170cb197d111282 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 15 Feb 2010 12:11:51 -0800 Subject: ceph: reset osd connections after fault A single osd connection fault (e.g. tcp disconnect) wasn't reopening the connection, which causes all current and future requests for that osd to hang. Signed-off-by: Sage Weil --- fs/ceph/osd_client.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index 7f8a26fdcc2c..fa0f73703954 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -369,7 +369,6 @@ static void osd_reset(struct ceph_connection *con) return; dout("osd_reset osd%d\n", osd->o_osd); osdc = osd->o_osdc; - osd->o_incarnation++; down_read(&osdc->map_sem); kick_requests(osdc, osd); up_read(&osdc->map_sem); @@ -921,7 +920,9 @@ static void kick_requests(struct ceph_osd_client *osdc, dout("kick_requests osd%d\n", kickosd ? kickosd->o_osd : -1); mutex_lock(&osdc->request_mutex); - if (!kickosd) { + if (kickosd) { + __reset_osd(osdc, kickosd); + } else { for (p = rb_first(&osdc->osds); p; p = n) { struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node); -- cgit v1.2.3 From e2663ab60de59d20fa33da3528f6d5359f8eb003 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 16 Feb 2010 22:01:03 -0800 Subject: ceph: allow connection to be reopened by fault callback Fix the messenger to allow a ceph_con_open() during the fault callback. Previously the work wasn't getting queued on the connection because the fault path avoids requeued work (normally spurious). Loop on reopening by checking for the OPENING state bit. This fixes OSD reconnects when a TCP connection drops. Signed-off-by: Sage Weil --- fs/ceph/messenger.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index 44bdaf439924..acf383f6a9cd 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -1808,7 +1808,7 @@ done: clear_bit(BUSY, &con->state); dout("con->state=%lu\n", con->state); if (test_bit(QUEUED, &con->state)) { - if (!backoff) { + if (!backoff || test_bit(OPENING, &con->state)) { dout("con_work %p QUEUED reset, looping\n", con); goto more; } -- cgit v1.2.3 From 91e45ce38946a8efa21fefbc65d023ca3c0b434f Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 15 Feb 2010 12:05:09 -0800 Subject: ceph: cancel delayed work when closing connection This ensures that if/when we reopen the connection, we can requeue work on the connection immediately, without waiting for an old timer to expire. Queue new delayed work inside con->mutex to avoid any race. This fixes problems with clients failing to reconnect to the MDS due to the client_reconnect message arriving too late (due to waiting for an old delayed work timeout to expire). Signed-off-by: Sage Weil --- fs/ceph/messenger.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index acf383f6a9cd..ca2ad0e5bb28 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -344,6 +344,7 @@ void ceph_con_close(struct ceph_connection *con) clear_bit(STANDBY, &con->state); /* avoid connect_seq bump */ mutex_lock(&con->mutex); reset_connection(con); + cancel_delayed_work(&con->work); mutex_unlock(&con->mutex); queue_con(con); } @@ -1841,6 +1842,8 @@ static void ceph_fault(struct ceph_connection *con) clear_bit(BUSY, &con->state); /* to avoid an improbable race */ mutex_lock(&con->mutex); + if (test_bit(CLOSED, &con->state)) + goto out_unlock; con_close_socket(con); @@ -1876,8 +1879,6 @@ static void ceph_fault(struct ceph_connection *con) else if (con->delay < MAX_DELAY_INTERVAL) con->delay *= 2; - mutex_unlock(&con->mutex); - /* explicitly schedule work to try to reconnect again later. */ dout("fault queueing %p delay %lu\n", con, con->delay); con->ops->get(con); @@ -1885,6 +1886,8 @@ static void ceph_fault(struct ceph_connection *con) round_jiffies_relative(con->delay)) == 0) con->ops->put(con); +out_unlock: + mutex_unlock(&con->mutex); out: if (con->ops->fault) con->ops->fault(con); -- cgit v1.2.3 From 44ca18f2682eb1cfbed153849adedb79e3e19790 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 15 Feb 2010 12:08:46 -0800 Subject: ceph: use rbtree for mds requests The rbtree is a more appropriate data structure than a radix_tree. It avoids extra memory usage and simplifies the code. It also fixes a bug where the debugfs 'mdsc' file wasn't including the most recent mds request. Signed-off-by: Sage Weil --- fs/ceph/debugfs.c | 13 ++--- fs/ceph/mds_client.c | 149 +++++++++++++++++++++++++++++++-------------------- fs/ceph/mds_client.h | 4 +- 3 files changed, 97 insertions(+), 69 deletions(-) diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c index fba44b2a6086..cd5dd805e4be 100644 --- a/fs/ceph/debugfs.c +++ b/fs/ceph/debugfs.c @@ -142,21 +142,16 @@ static int monc_show(struct seq_file *s, void *p) static int mdsc_show(struct seq_file *s, void *p) { struct ceph_client *client = s->private; - struct ceph_mds_request *req; - u64 nexttid = 0; - int got; struct ceph_mds_client *mdsc = &client->mdsc; + struct ceph_mds_request *req; + struct rb_node *rp; int pathlen; u64 pathbase; char *path; mutex_lock(&mdsc->mutex); - while (nexttid < mdsc->last_tid) { - got = radix_tree_gang_lookup(&mdsc->request_tree, - (void **)&req, nexttid, 1); - if (got == 0) - break; - nexttid = req->r_tid + 1; + for (rp = rb_first(&mdsc->request_tree); rp; rp = rb_next(rp)) { + req = rb_entry(rp, struct ceph_mds_request, r_node); if (req->r_request) seq_printf(s, "%lld\tmds%d\t", req->r_tid, req->r_mds); diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index aa8506bad42d..81840d6b68a4 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -255,6 +255,7 @@ static const char *session_state_name(int s) case CEPH_MDS_SESSION_OPEN: return "open"; case CEPH_MDS_SESSION_HUNG: return "hung"; case CEPH_MDS_SESSION_CLOSING: return "closing"; + case CEPH_MDS_SESSION_RESTARTING: return "restarting"; case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting"; default: return "???"; } @@ -448,10 +449,42 @@ static struct ceph_mds_request *__lookup_request(struct ceph_mds_client *mdsc, u64 tid) { struct ceph_mds_request *req; - req = radix_tree_lookup(&mdsc->request_tree, tid); - if (req) - ceph_mdsc_get_request(req); - return req; + struct rb_node *n = mdsc->request_tree.rb_node; + + while (n) { + req = rb_entry(n, struct ceph_mds_request, r_node); + if (tid < req->r_tid) + n = n->rb_left; + else if (tid > req->r_tid) + n = n->rb_right; + else { + ceph_mdsc_get_request(req); + return req; + } + } + return NULL; +} + +static void __insert_request(struct ceph_mds_client *mdsc, + struct ceph_mds_request *new) +{ + struct rb_node **p = &mdsc->request_tree.rb_node; + struct rb_node *parent = NULL; + struct ceph_mds_request *req = NULL; + + while (*p) { + parent = *p; + req = rb_entry(parent, struct ceph_mds_request, r_node); + if (new->r_tid < req->r_tid) + p = &(*p)->rb_left; + else if (new->r_tid > req->r_tid) + p = &(*p)->rb_right; + else + BUG(); + } + + rb_link_node(&new->r_node, parent, p); + rb_insert_color(&new->r_node, &mdsc->request_tree); } /* @@ -469,7 +502,7 @@ static void __register_request(struct ceph_mds_client *mdsc, ceph_reserve_caps(&req->r_caps_reservation, req->r_num_caps); dout("__register_request %p tid %lld\n", req, req->r_tid); ceph_mdsc_get_request(req); - radix_tree_insert(&mdsc->request_tree, req->r_tid, (void *)req); + __insert_request(mdsc, req); if (dir) { struct ceph_inode_info *ci = ceph_inode(dir); @@ -485,7 +518,7 @@ static void __unregister_request(struct ceph_mds_client *mdsc, struct ceph_mds_request *req) { dout("__unregister_request %p tid %lld\n", req, req->r_tid); - radix_tree_delete(&mdsc->request_tree, req->r_tid); + rb_erase(&req->r_node, &mdsc->request_tree); ceph_mdsc_put_request(req); if (req->r_unsafe_dir) { @@ -1115,17 +1148,25 @@ ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode) } /* - * return oldest (lowest) tid in request tree, 0 if none. + * return oldest (lowest) request, tid in request tree, 0 if none. * * called under mdsc->mutex. */ +static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc) +{ + if (RB_EMPTY_ROOT(&mdsc->request_tree)) + return NULL; + return rb_entry(rb_first(&mdsc->request_tree), + struct ceph_mds_request, r_node); +} + static u64 __get_oldest_tid(struct ceph_mds_client *mdsc) { - struct ceph_mds_request *first; - if (radix_tree_gang_lookup(&mdsc->request_tree, - (void **)&first, 0, 1) <= 0) - return 0; - return first->r_tid; + struct ceph_mds_request *req = __get_oldest_req(mdsc); + + if (req) + return req->r_tid; + return 0; } /* @@ -1540,26 +1581,19 @@ static void __wake_requests(struct ceph_mds_client *mdsc, */ static void kick_requests(struct ceph_mds_client *mdsc, int mds, int all) { - struct ceph_mds_request *reqs[10]; - u64 nexttid = 0; - int i, got; + struct ceph_mds_request *req; + struct rb_node *p; dout("kick_requests mds%d\n", mds); - while (nexttid <= mdsc->last_tid) { - got = radix_tree_gang_lookup(&mdsc->request_tree, - (void **)&reqs, nexttid, 10); - if (got == 0) - break; - nexttid = reqs[got-1]->r_tid + 1; - for (i = 0; i < got; i++) { - if (reqs[i]->r_got_unsafe) - continue; - if (reqs[i]->r_session && - reqs[i]->r_session->s_mds == mds) { - dout(" kicking tid %llu\n", reqs[i]->r_tid); - put_request_session(reqs[i]); - __do_request(mdsc, reqs[i]); - } + for (p = rb_first(&mdsc->request_tree); p; p = rb_next(p)) { + req = rb_entry(p, struct ceph_mds_request, r_node); + if (req->r_got_unsafe) + continue; + if (req->r_session && + req->r_session->s_mds == mds) { + dout(" kicking tid %llu\n", req->r_tid); + put_request_session(req); + __do_request(mdsc, req); } } } @@ -1748,7 +1782,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) list_del_init(&req->r_unsafe_item); /* last unsafe request during umount? */ - if (mdsc->stopping && !__get_oldest_tid(mdsc)) + if (mdsc->stopping && !__get_oldest_req(mdsc)) complete(&mdsc->safe_umount_waiters); mutex_unlock(&mdsc->mutex); goto out; @@ -2573,7 +2607,7 @@ int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client) INIT_LIST_HEAD(&mdsc->snap_empty); spin_lock_init(&mdsc->snap_empty_lock); mdsc->last_tid = 0; - INIT_RADIX_TREE(&mdsc->request_tree, GFP_NOFS); + mdsc->request_tree = RB_ROOT; INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work); mdsc->last_renew_caps = jiffies; INIT_LIST_HEAD(&mdsc->cap_delay_list); @@ -2600,20 +2634,19 @@ static void wait_requests(struct ceph_mds_client *mdsc) struct ceph_client *client = mdsc->client; mutex_lock(&mdsc->mutex); - if (__get_oldest_tid(mdsc)) { + if (__get_oldest_req(mdsc)) { mutex_unlock(&mdsc->mutex); + dout("wait_requests waiting for requests\n"); wait_for_completion_timeout(&mdsc->safe_umount_waiters, client->mount_args->mount_timeout * HZ); - mutex_lock(&mdsc->mutex); /* tear down remaining requests */ - while (radix_tree_gang_lookup(&mdsc->request_tree, - (void **)&req, 0, 1)) { + mutex_lock(&mdsc->mutex); + while ((req = __get_oldest_req(mdsc))) { dout("wait_requests timed out on tid %llu\n", req->r_tid); - radix_tree_delete(&mdsc->request_tree, req->r_tid); - ceph_mdsc_put_request(req); + __unregister_request(mdsc, req); } } mutex_unlock(&mdsc->mutex); @@ -2639,31 +2672,29 @@ void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc) */ static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid) { - struct ceph_mds_request *req; - u64 next_tid = 0; - int got; + struct ceph_mds_request *req = NULL; + struct rb_node *n; mutex_lock(&mdsc->mutex); dout("wait_unsafe_requests want %lld\n", want_tid); - while (1) { - got = radix_tree_gang_lookup(&mdsc->request_tree, (void **)&req, - next_tid, 1); - if (!got) - break; - if (req->r_tid > want_tid) + req = __get_oldest_req(mdsc); + while (req && req->r_tid <= want_tid) { + if ((req->r_op & CEPH_MDS_OP_WRITE)) { + /* write op */ + ceph_mdsc_get_request(req); + mutex_unlock(&mdsc->mutex); + dout("wait_unsafe_requests wait on %llu (want %llu)\n", + req->r_tid, want_tid); + wait_for_completion(&req->r_safe_completion); + mutex_lock(&mdsc->mutex); + n = rb_next(&req->r_node); + ceph_mdsc_put_request(req); + } else { + n = rb_next(&req->r_node); + } + if (!n) break; - - next_tid = req->r_tid + 1; - if ((req->r_op & CEPH_MDS_OP_WRITE) == 0) - continue; /* not a write op */ - - ceph_mdsc_get_request(req); - mutex_unlock(&mdsc->mutex); - dout("wait_unsafe_requests wait on %llu (want %llu)\n", - req->r_tid, want_tid); - wait_for_completion(&req->r_safe_completion); - mutex_lock(&mdsc->mutex); - ceph_mdsc_put_request(req); + req = rb_entry(n, struct ceph_mds_request, r_node); } mutex_unlock(&mdsc->mutex); dout("wait_unsafe_requests done\n"); diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index ee71495e27c4..98f09cd06006 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -6,6 +6,7 @@ #include #include #include +#include #include #include "types.h" @@ -150,6 +151,7 @@ typedef void (*ceph_mds_request_callback_t) (struct ceph_mds_client *mdsc, */ struct ceph_mds_request { u64 r_tid; /* transaction id */ + struct rb_node r_node; int r_op; /* mds op code */ int r_mds; @@ -249,7 +251,7 @@ struct ceph_mds_client { spinlock_t snap_empty_lock; /* protect snap_empty */ u64 last_tid; /* most recent mds request */ - struct radix_tree_root request_tree; /* pending mds requests */ + struct rb_root request_tree; /* pending mds requests */ struct delayed_work delayed_work; /* delayed work */ unsigned long last_renew_caps; /* last time we renewed our caps */ struct list_head cap_delay_list; /* caps with delayed release */ -- cgit v1.2.3 From a105f00cf17d711e876b3dc67e15f9a89b7de5a3 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 15 Feb 2010 14:37:55 -0800 Subject: ceph: use rbtree for snap_realms Switch from radix tree to rbtree for snap realms. This is much more appropriate given that realm keys are few and far between. Signed-off-by: Sage Weil --- fs/ceph/mds_client.c | 16 +++++----------- fs/ceph/mds_client.h | 3 +-- fs/ceph/snap.c | 51 ++++++++++++++++++++++++++++++++++++++++----------- fs/ceph/super.h | 2 ++ 4 files changed, 48 insertions(+), 24 deletions(-) diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 81840d6b68a4..02834cecc3a0 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -2097,9 +2097,8 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc, int mds) { struct ceph_mds_session *session = NULL; struct ceph_msg *reply; + struct rb_node *p; int err; - int got; - u64 next_snap_ino = 0; struct ceph_pagelist *pagelist; pr_info("reconnect to recovering mds%d\n", mds); @@ -2155,14 +2154,10 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc, int mds) * parent for all of our realms. If the mds has any newer info, * it will tell us. */ - next_snap_ino = 0; - while (1) { - struct ceph_snap_realm *realm; + for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) { + struct ceph_snap_realm *realm = + rb_entry(p, struct ceph_snap_realm, node); struct ceph_mds_snaprealm_reconnect sr_rec; - got = radix_tree_gang_lookup(&mdsc->snap_realms, - (void **)&realm, next_snap_ino, 1); - if (!got) - break; dout(" adding snap realm %llx seq %lld parent %llx\n", realm->ino, realm->seq, realm->parent_ino); @@ -2172,7 +2167,6 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc, int mds) err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec)); if (err) goto fail; - next_snap_ino = realm->ino + 1; } send: @@ -2603,7 +2597,7 @@ int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client) mdsc->max_sessions = 0; mdsc->stopping = 0; init_rwsem(&mdsc->snap_rwsem); - INIT_RADIX_TREE(&mdsc->snap_realms, GFP_NOFS); + mdsc->snap_realms = RB_ROOT; INIT_LIST_HEAD(&mdsc->snap_empty); spin_lock_init(&mdsc->snap_empty_lock); mdsc->last_tid = 0; diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index 98f09cd06006..9d6b90173879 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -5,7 +5,6 @@ #include #include #include -#include #include #include @@ -246,7 +245,7 @@ struct ceph_mds_client { * should be destroyed. */ struct rw_semaphore snap_rwsem; - struct radix_tree_root snap_realms; + struct rb_root snap_realms; struct list_head snap_empty; spinlock_t snap_empty_lock; /* protect snap_empty */ diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index dcf18d92130a..49d0c4c59d81 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -1,6 +1,5 @@ #include "ceph_debug.h" -#include #include #include "super.h" @@ -77,6 +76,28 @@ void ceph_get_snap_realm(struct ceph_mds_client *mdsc, atomic_inc(&realm->nref); } +static void __insert_snap_realm(struct rb_root *root, + struct ceph_snap_realm *new) +{ + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; + struct ceph_snap_realm *r = NULL; + + while (*p) { + parent = *p; + r = rb_entry(parent, struct ceph_snap_realm, node); + if (new->ino < r->ino) + p = &(*p)->rb_left; + else if (new->ino > r->ino) + p = &(*p)->rb_right; + else + BUG(); + } + + rb_link_node(&new->node, parent, p); + rb_insert_color(&new->node, root); +} + /* * create and get the realm rooted at @ino and bump its ref count. * @@ -92,8 +113,6 @@ static struct ceph_snap_realm *ceph_create_snap_realm( if (!realm) return ERR_PTR(-ENOMEM); - radix_tree_insert(&mdsc->snap_realms, ino, realm); - atomic_set(&realm->nref, 0); /* tree does not take a ref */ realm->ino = ino; INIT_LIST_HEAD(&realm->children); @@ -101,24 +120,34 @@ static struct ceph_snap_realm *ceph_create_snap_realm( INIT_LIST_HEAD(&realm->empty_item); INIT_LIST_HEAD(&realm->inodes_with_caps); spin_lock_init(&realm->inodes_with_caps_lock); + __insert_snap_realm(&mdsc->snap_realms, realm); dout("create_snap_realm %llx %p\n", realm->ino, realm); return realm; } /* - * find and get (if found) the realm rooted at @ino and bump its ref count. + * lookup the realm rooted at @ino. * * caller must hold snap_rwsem for write. */ struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc, u64 ino) { - struct ceph_snap_realm *realm; - - realm = radix_tree_lookup(&mdsc->snap_realms, ino); - if (realm) - dout("lookup_snap_realm %llx %p\n", realm->ino, realm); - return realm; + struct rb_node *n = mdsc->snap_realms.rb_node; + struct ceph_snap_realm *r; + + while (n) { + r = rb_entry(n, struct ceph_snap_realm, node); + if (ino < r->ino) + n = n->rb_left; + else if (ino > r->ino) + n = n->rb_right; + else { + dout("lookup_snap_realm %llx %p\n", r->ino, r); + return r; + } + } + return NULL; } static void __put_snap_realm(struct ceph_mds_client *mdsc, @@ -132,7 +161,7 @@ static void __destroy_snap_realm(struct ceph_mds_client *mdsc, { dout("__destroy_snap_realm %p %llx\n", realm, realm->ino); - radix_tree_delete(&mdsc->snap_realms, realm->ino); + rb_erase(&realm->node, &mdsc->snap_realms); if (realm->parent) { list_del_init(&realm->child_item); diff --git a/fs/ceph/super.h b/fs/ceph/super.h index b2adfccbab98..1f3928785e12 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -656,6 +656,8 @@ static inline void ceph_put_snap_context(struct ceph_snap_context *sc) struct ceph_snap_realm { u64 ino; atomic_t nref; + struct rb_node node; + u64 created, seq; u64 parent_ino; u64 parent_since; /* snapid when our current parent became so */ -- cgit v1.2.3 From 85ff03f6bfef7d5b59ab3aefd4773f497ffad8a4 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 15 Feb 2010 14:47:28 -0800 Subject: ceph: use rbtree for mon statfs requests An rbtree is lighter weight, particularly given we will generally have very few in-flight statfs requests. Signed-off-by: Sage Weil --- fs/ceph/debugfs.c | 14 ++++------- fs/ceph/mon_client.c | 67 ++++++++++++++++++++++++++++++++++++---------------- fs/ceph/mon_client.h | 5 ++-- 3 files changed, 54 insertions(+), 32 deletions(-) diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c index cd5dd805e4be..b58bd9188692 100644 --- a/fs/ceph/debugfs.c +++ b/fs/ceph/debugfs.c @@ -112,9 +112,8 @@ static int monc_show(struct seq_file *s, void *p) { struct ceph_client *client = s->private; struct ceph_mon_statfs_request *req; - u64 nexttid = 0; - int got; struct ceph_mon_client *monc = &client->monc; + struct rb_node *rp; mutex_lock(&monc->mutex); @@ -125,17 +124,12 @@ static int monc_show(struct seq_file *s, void *p) if (monc->want_next_osdmap) seq_printf(s, "want next osdmap\n"); - while (nexttid < monc->last_tid) { - got = radix_tree_gang_lookup(&monc->statfs_request_tree, - (void **)&req, nexttid, 1); - if (got == 0) - break; - nexttid = req->tid + 1; - + for (rp = rb_first(&monc->statfs_request_tree); rp; rp = rb_next(rp)) { + req = rb_entry(rp, struct ceph_mon_statfs_request, node); seq_printf(s, "%lld statfs\n", req->tid); } - mutex_unlock(&monc->mutex); + mutex_unlock(&monc->mutex); return 0; } diff --git a/fs/ceph/mon_client.c b/fs/ceph/mon_client.c index fec41a0eff86..542276e60798 100644 --- a/fs/ceph/mon_client.c +++ b/fs/ceph/mon_client.c @@ -343,6 +343,46 @@ out: /* * statfs */ +static struct ceph_mon_statfs_request *__lookup_statfs( + struct ceph_mon_client *monc, u64 tid) +{ + struct ceph_mon_statfs_request *req; + struct rb_node *n = monc->statfs_request_tree.rb_node; + + while (n) { + req = rb_entry(n, struct ceph_mon_statfs_request, node); + if (tid < req->tid) + n = n->rb_left; + else if (tid > req->tid) + n = n->rb_right; + else + return req; + } + return NULL; +} + +static void __insert_statfs(struct ceph_mon_client *monc, + struct ceph_mon_statfs_request *new) +{ + struct rb_node **p = &monc->statfs_request_tree.rb_node; + struct rb_node *parent = NULL; + struct ceph_mon_statfs_request *req = NULL; + + while (*p) { + parent = *p; + req = rb_entry(parent, struct ceph_mon_statfs_request, node); + if (new->tid < req->tid) + p = &(*p)->rb_left; + else if (new->tid > req->tid) + p = &(*p)->rb_right; + else + BUG(); + } + + rb_link_node(&new->node, parent, p); + rb_insert_color(&new->node, &monc->statfs_request_tree); +} + static void handle_statfs_reply(struct ceph_mon_client *monc, struct ceph_msg *msg) { @@ -356,7 +396,7 @@ static void handle_statfs_reply(struct ceph_mon_client *monc, dout("handle_statfs_reply %p tid %llu\n", msg, tid); mutex_lock(&monc->mutex); - req = radix_tree_lookup(&monc->statfs_request_tree, tid); + req = __lookup_statfs(monc, tid); if (req) { *req->buf = reply->st; req->result = 0; @@ -416,11 +456,7 @@ int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf) req.tid = ++monc->last_tid; req.last_attempt = jiffies; req.delay = BASE_DELAY_INTERVAL; - if (radix_tree_insert(&monc->statfs_request_tree, req.tid, &req) < 0) { - mutex_unlock(&monc->mutex); - pr_err("ENOMEM in do_statfs\n"); - return -ENOMEM; - } + __insert_statfs(monc, &req); monc->num_statfs_requests++; mutex_unlock(&monc->mutex); @@ -430,7 +466,7 @@ int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf) err = wait_for_completion_interruptible(&req.completion); mutex_lock(&monc->mutex); - radix_tree_delete(&monc->statfs_request_tree, req.tid); + rb_erase(&req.node, &monc->statfs_request_tree); monc->num_statfs_requests--; ceph_msgpool_resv(&monc->msgpool_statfs_reply, -1); mutex_unlock(&monc->mutex); @@ -445,20 +481,11 @@ int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf) */ static void __resend_statfs(struct ceph_mon_client *monc) { - u64 next_tid = 0; - int got; - int did = 0; struct ceph_mon_statfs_request *req; + struct rb_node *p; - while (1) { - got = radix_tree_gang_lookup(&monc->statfs_request_tree, - (void **)&req, - next_tid, 1); - if (got == 0) - break; - did++; - next_tid = req->tid + 1; - + for (p = rb_first(&monc->statfs_request_tree); p; p = rb_next(p)) { + req = rb_entry(p, struct ceph_mon_statfs_request, node); send_statfs(monc, req); } } @@ -578,7 +605,7 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl) monc->sub_sent = 0; INIT_DELAYED_WORK(&monc->delayed_work, delayed_work); - INIT_RADIX_TREE(&monc->statfs_request_tree, GFP_NOFS); + monc->statfs_request_tree = RB_ROOT; monc->num_statfs_requests = 0; monc->last_tid = 0; diff --git a/fs/ceph/mon_client.h b/fs/ceph/mon_client.h index 5ca8e48d4379..b958ad5afa06 100644 --- a/fs/ceph/mon_client.h +++ b/fs/ceph/mon_client.h @@ -2,7 +2,7 @@ #define _FS_CEPH_MON_CLIENT_H #include -#include +#include #include "messenger.h" #include "msgpool.h" @@ -45,6 +45,7 @@ struct ceph_mon_request { */ struct ceph_mon_statfs_request { u64 tid; + struct rb_node node; int result; struct ceph_statfs *buf; struct completion completion; @@ -75,7 +76,7 @@ struct ceph_mon_client { struct ceph_msgpool msgpool_auth_reply; /* pending statfs requests */ - struct radix_tree_root statfs_request_tree; + struct rb_root statfs_request_tree; int num_statfs_requests; u64 last_tid; -- cgit v1.2.3 From 5ce6e9dbe6805ab8ee67e21936d17f431adc63c6 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 15 Feb 2010 16:22:28 -0800 Subject: ceph: fix authentication races, auth_none oops Call __validate_auth() under monc->mutex, and use helper for initial hello so that the pending_auth flag is set. This fixes possible races in which we have an authentication request (hello or otherwise) pending and send another one. In particular, with auth_none, we _never_ want to call ceph_build_auth() from __validate_auth(), since the ->build_request() method is NULL. Signed-off-by: Sage Weil --- fs/ceph/mon_client.c | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/fs/ceph/mon_client.c b/fs/ceph/mon_client.c index 542276e60798..40d7d90bbed1 100644 --- a/fs/ceph/mon_client.c +++ b/fs/ceph/mon_client.c @@ -95,6 +95,18 @@ int ceph_monmap_contains(struct ceph_monmap *m, struct ceph_entity_addr *addr) return 0; } +/* + * Send an auth request. + */ +static void __send_prepared_auth_request(struct ceph_mon_client *monc, int len) +{ + monc->pending_auth = 1; + monc->m_auth->front.iov_len = len; + monc->m_auth->hdr.front_len = cpu_to_le32(len); + ceph_msg_get(monc->m_auth); /* keep our ref */ + ceph_con_send(monc->con, monc->m_auth); +} + /* * Close monitor session, if any. */ @@ -137,10 +149,7 @@ static int __open_session(struct ceph_mon_client *monc) ret = ceph_auth_build_hello(monc->auth, monc->m_auth->front.iov_base, monc->m_auth->front_max); - monc->m_auth->front.iov_len = ret; - monc->m_auth->hdr.front_len = cpu_to_le32(ret); - ceph_msg_get(monc->m_auth); /* keep our ref */ - ceph_con_send(monc->con, monc->m_auth); + __send_prepared_auth_request(monc, ret); } else { dout("open_session mon%d already open\n", monc->cur_mon); } @@ -507,11 +516,9 @@ static void delayed_work(struct work_struct *work) __open_session(monc); /* continue hunting */ } else { ceph_con_keepalive(monc->con); - mutex_unlock(&monc->mutex); __validate_auth(monc); - mutex_lock(&monc->mutex); if (monc->auth->ops->is_authenticated(monc->auth)) __send_subscribe(monc); } @@ -650,16 +657,6 @@ void ceph_monc_stop(struct ceph_mon_client *monc) kfree(monc->monmap); } -static void __send_prepared_auth_request(struct ceph_mon_client *monc, int len) -{ - monc->pending_auth = 1; - monc->m_auth->front.iov_len = len; - monc->m_auth->hdr.front_len = cpu_to_le32(len); - ceph_msg_get(monc->m_auth); /* keep our ref */ - ceph_con_send(monc->con, monc->m_auth); -} - - static void handle_auth_reply(struct ceph_mon_client *monc, struct ceph_msg *msg) { -- cgit v1.2.3 From 85ccce43a3fc15a40ded6ae1603e3f68a17f4d24 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 17 Feb 2010 10:02:43 -0800 Subject: ceph: clean up readdir caps reservation Use a global counter for the minimum number of allocated caps instead of hard coding a check against readdir_max. This takes into account multiple client instances, and avoids examining the superblock mount options when a cap is dropped. Signed-off-by: Sage Weil --- fs/ceph/caps.c | 23 +++++++++++++++++------ fs/ceph/debugfs.c | 13 +++++++------ fs/ceph/super.c | 5 +++++ fs/ceph/super.h | 5 ++++- 4 files changed, 33 insertions(+), 13 deletions(-) diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index ab9b571dda11..f94b56faba3b 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -128,6 +128,7 @@ static int caps_total_count; /* total caps allocated */ static int caps_use_count; /* in use */ static int caps_reserve_count; /* unused, reserved */ static int caps_avail_count; /* unused, unreserved */ +static int caps_min_count; /* keep at least this many (unreserved) */ void __init ceph_caps_init(void) { @@ -149,6 +150,15 @@ void ceph_caps_finalize(void) caps_avail_count = 0; caps_use_count = 0; caps_reserve_count = 0; + caps_min_count = 0; + spin_unlock(&caps_list_lock); +} + +void ceph_adjust_min_caps(int delta) +{ + spin_lock(&caps_list_lock); + caps_min_count += delta; + BUG_ON(caps_min_count < 0); spin_unlock(&caps_list_lock); } @@ -265,12 +275,10 @@ static void put_cap(struct ceph_cap *cap, caps_reserve_count, caps_avail_count); caps_use_count--; /* - * Keep some preallocated caps around, at least enough to do a - * readdir (which needs to preallocate lots of them), to avoid - * lots of free/alloc churn. + * Keep some preallocated caps around (ceph_min_count), to + * avoid lots of free/alloc churn. */ - if (caps_avail_count >= caps_reserve_count + - ceph_client(cap->ci->vfs_inode.i_sb)->mount_args->max_readdir) { + if (caps_avail_count >= caps_reserve_count + caps_min_count) { caps_total_count--; kmem_cache_free(ceph_cap_cachep, cap); } else { @@ -289,7 +297,8 @@ static void put_cap(struct ceph_cap *cap, } void ceph_reservation_status(struct ceph_client *client, - int *total, int *avail, int *used, int *reserved) + int *total, int *avail, int *used, int *reserved, + int *min) { if (total) *total = caps_total_count; @@ -299,6 +308,8 @@ void ceph_reservation_status(struct ceph_client *client, *used = caps_use_count; if (reserved) *reserved = caps_reserve_count; + if (min) + *min = caps_min_count; } /* diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c index b58bd9188692..1a47b5c25b5f 100644 --- a/fs/ceph/debugfs.c +++ b/fs/ceph/debugfs.c @@ -255,14 +255,15 @@ static int osdc_show(struct seq_file *s, void *pp) static int caps_show(struct seq_file *s, void *p) { struct ceph_client *client = p; - int total, avail, used, reserved; + int total, avail, used, reserved, min; - ceph_reservation_status(client, &total, &avail, &used, &reserved); + ceph_reservation_status(client, &total, &avail, &used, &reserved, &min); seq_printf(s, "total\t\t%d\n" - "avail\t\t%d\n" - "used\t\t%d\n" - "reserved\t%d\n", - total, avail, used, reserved); + "avail\t\t%d\n" + "used\t\t%d\n" + "reserved\t%d\n" + "min\t%d\n", + total, avail, used, reserved, min); return 0; } diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 39aaf29a04a0..74953be75f8f 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -578,6 +578,9 @@ static struct ceph_client *ceph_create_client(struct ceph_mount_args *args) if (!client->wb_pagevec_pool) goto fail_trunc_wq; + /* caps */ + client->min_caps = args->max_readdir; + ceph_adjust_min_caps(client->min_caps); /* subsystems */ err = ceph_monc_init(&client->monc, client); @@ -619,6 +622,8 @@ static void ceph_destroy_client(struct ceph_client *client) ceph_monc_stop(&client->monc); ceph_osdc_stop(&client->osdc); + ceph_adjust_min_caps(-client->min_caps); + ceph_debugfs_client_cleanup(client); destroy_workqueue(client->wb_wq); destroy_workqueue(client->pg_inv_wq); diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 1f3928785e12..3b5faf9980f8 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -129,6 +129,8 @@ struct ceph_client { int auth_err; + int min_caps; /* min caps i added */ + struct ceph_messenger *msgr; /* messenger instance */ struct ceph_mon_client monc; struct ceph_mds_client mdsc; @@ -557,11 +559,12 @@ extern int __ceph_caps_mds_wanted(struct ceph_inode_info *ci); extern void ceph_caps_init(void); extern void ceph_caps_finalize(void); +extern void ceph_adjust_min_caps(int delta); extern int ceph_reserve_caps(struct ceph_cap_reservation *ctx, int need); extern int ceph_unreserve_caps(struct ceph_cap_reservation *ctx); extern void ceph_reservation_status(struct ceph_client *client, int *total, int *avail, int *used, - int *reserved); + int *reserved, int *min); static inline struct ceph_client *ceph_inode_to_client(struct inode *inode) { -- cgit v1.2.3 From 7c1332b8cb5b27656cf6ab1f5fe808a8eb8bb2c0 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 16 Feb 2010 11:39:45 -0800 Subject: ceph: fix iterate_caps removal race We need to be able to iterate over all caps on a session with a possibly slow callback on each cap. To allow this, we used to prevent cap reordering while we were iterating. However, we were not safe from races with removal: removing the 'next' cap would make the next pointer from list_for_each_entry_safe be invalid, and cause a lock up or similar badness. Instead, we keep an iterator pointer in the session pointing to the current cap. As before, we avoid reordering. For removal, if the cap isn't the current cap we are iterating over, we are fine. If it is, we clear cap->ci (to mark the cap as pending removal) but leave it in the session list. In iterate_caps, we can safely finish removal and get the next cap pointer. While we're at it, clean up put_cap to not take a cap reservation context, as it was never used. Signed-off-by: Sage Weil --- fs/ceph/caps.c | 47 ++++++++++++++++++++++++----------------------- fs/ceph/mds_client.c | 51 ++++++++++++++++++++++++++++++++++++++++++--------- fs/ceph/mds_client.h | 2 +- fs/ceph/super.h | 6 +++--- 4 files changed, 70 insertions(+), 36 deletions(-) diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index f94b56faba3b..4958a2ef3e04 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -266,12 +266,11 @@ static struct ceph_cap *get_cap(struct ceph_cap_reservation *ctx) return cap; } -static void put_cap(struct ceph_cap *cap, - struct ceph_cap_reservation *ctx) +void ceph_put_cap(struct ceph_cap *cap) { spin_lock(&caps_list_lock); - dout("put_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n", - ctx, ctx ? ctx->count : 0, caps_total_count, caps_use_count, + dout("put_cap %p %d = %d used + %d resv + %d avail\n", + cap, caps_total_count, caps_use_count, caps_reserve_count, caps_avail_count); caps_use_count--; /* @@ -282,12 +281,7 @@ static void put_cap(struct ceph_cap *cap, caps_total_count--; kmem_cache_free(ceph_cap_cachep, cap); } else { - if (ctx) { - ctx->count++; - caps_reserve_count++; - } else { - caps_avail_count++; - } + caps_avail_count++; list_add(&cap->caps_item, &caps_list); } @@ -709,7 +703,7 @@ static void __touch_cap(struct ceph_cap *cap) struct ceph_mds_session *s = cap->session; spin_lock(&s->s_cap_lock); - if (!s->s_iterating_caps) { + if (s->s_cap_iterator == NULL) { dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap, s->s_mds); list_move_tail(&cap->session_caps, &s->s_caps); @@ -865,8 +859,7 @@ static int __ceph_is_any_caps(struct ceph_inode_info *ci) * caller should hold i_lock, and session s_mutex. * returns true if this is the last cap. if so, caller should iput. */ -void __ceph_remove_cap(struct ceph_cap *cap, - struct ceph_cap_reservation *ctx) +void __ceph_remove_cap(struct ceph_cap *cap) { struct ceph_mds_session *session = cap->session; struct ceph_inode_info *ci = cap->ci; @@ -874,19 +867,27 @@ void __ceph_remove_cap(struct ceph_cap *cap, dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode); - /* remove from session list */ - spin_lock(&session->s_cap_lock); - list_del_init(&cap->session_caps); - session->s_nr_caps--; - spin_unlock(&session->s_cap_lock); - /* remove from inode list */ rb_erase(&cap->ci_node, &ci->i_caps); - cap->session = NULL; + cap->ci = NULL; if (ci->i_auth_cap == cap) ci->i_auth_cap = NULL; - put_cap(cap, ctx); + /* remove from session list */ + spin_lock(&session->s_cap_lock); + if (session->s_cap_iterator == cap) { + /* not yet, we are iterating over this very cap */ + dout("__ceph_remove_cap delaying %p removal from session %p\n", + cap, cap->session); + } else { + list_del_init(&cap->session_caps); + session->s_nr_caps--; + cap->session = NULL; + } + spin_unlock(&session->s_cap_lock); + + if (cap->session == NULL) + ceph_put_cap(cap); if (!__ceph_is_any_caps(ci) && ci->i_snap_realm) { struct ceph_snap_realm *realm = ci->i_snap_realm; @@ -1022,7 +1023,7 @@ void ceph_queue_caps_release(struct inode *inode) } spin_unlock(&session->s_cap_lock); p = rb_next(p); - __ceph_remove_cap(cap, NULL); + __ceph_remove_cap(cap); } spin_unlock(&inode->i_lock); @@ -2521,7 +2522,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex, ci->i_cap_exporting_mseq = mseq; ci->i_cap_exporting_issued = cap->issued; } - __ceph_remove_cap(cap, NULL); + __ceph_remove_cap(cap); } else { WARN_ON(!cap); } diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 02834cecc3a0..124c0c17a14a 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -344,7 +344,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc, INIT_LIST_HEAD(&s->s_waiting); INIT_LIST_HEAD(&s->s_unsafe); s->s_num_cap_releases = 0; - s->s_iterating_caps = false; + s->s_cap_iterator = NULL; INIT_LIST_HEAD(&s->s_cap_releases); INIT_LIST_HEAD(&s->s_cap_releases_done); INIT_LIST_HEAD(&s->s_cap_flushing); @@ -729,28 +729,61 @@ static int iterate_session_caps(struct ceph_mds_session *session, int (*cb)(struct inode *, struct ceph_cap *, void *), void *arg) { - struct ceph_cap *cap, *ncap; - struct inode *inode; + struct list_head *p; + struct ceph_cap *cap; + struct inode *inode, *last_inode = NULL; + struct ceph_cap *old_cap = NULL; int ret; dout("iterate_session_caps %p mds%d\n", session, session->s_mds); spin_lock(&session->s_cap_lock); - session->s_iterating_caps = true; - list_for_each_entry_safe(cap, ncap, &session->s_caps, session_caps) { + p = session->s_caps.next; + while (p != &session->s_caps) { + cap = list_entry(p, struct ceph_cap, session_caps); inode = igrab(&cap->ci->vfs_inode); - if (!inode) + if (!inode) { + p = p->next; continue; + } + session->s_cap_iterator = cap; spin_unlock(&session->s_cap_lock); + + if (last_inode) { + iput(last_inode); + last_inode = NULL; + } + if (old_cap) { + ceph_put_cap(old_cap); + old_cap = NULL; + } + ret = cb(inode, cap, arg); - iput(inode); + last_inode = inode; + spin_lock(&session->s_cap_lock); + p = p->next; + if (cap->ci == NULL) { + dout("iterate_session_caps finishing cap %p removal\n", + cap); + BUG_ON(cap->session != session); + list_del_init(&cap->session_caps); + session->s_nr_caps--; + cap->session = NULL; + old_cap = cap; /* put_cap it w/o locks held */ + } if (ret < 0) goto out; } ret = 0; out: - session->s_iterating_caps = false; + session->s_cap_iterator = NULL; spin_unlock(&session->s_cap_lock); + + if (last_inode) + iput(last_inode); + if (old_cap) + ceph_put_cap(old_cap); + return ret; } @@ -942,7 +975,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg) session->s_trim_caps--; if (oissued) { /* we aren't the only cap.. just remove us */ - __ceph_remove_cap(cap, NULL); + __ceph_remove_cap(cap); } else { /* try to drop referring dentries */ spin_unlock(&inode->i_lock); diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index 9d6b90173879..961cc6f65878 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -114,7 +114,7 @@ struct ceph_mds_session { int s_num_cap_releases; struct list_head s_cap_releases; /* waiting cap_release messages */ struct list_head s_cap_releases_done; /* ready to send */ - bool s_iterating_caps; + struct ceph_cap *s_cap_iterator; /* protected by mutex */ struct list_head s_cap_flushing; /* inodes w/ flushing caps */ diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 3b5faf9980f8..384f0e2e7c68 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -795,15 +795,15 @@ extern int ceph_add_cap(struct inode *inode, int fmode, unsigned issued, unsigned wanted, unsigned cap, unsigned seq, u64 realmino, int flags, struct ceph_cap_reservation *caps_reservation); -extern void __ceph_remove_cap(struct ceph_cap *cap, - struct ceph_cap_reservation *ctx); +extern void __ceph_remove_cap(struct ceph_cap *cap); static inline void ceph_remove_cap(struct ceph_cap *cap) { struct inode *inode = &cap->ci->vfs_inode; spin_lock(&inode->i_lock); - __ceph_remove_cap(cap, NULL); + __ceph_remove_cap(cap); spin_unlock(&inode->i_lock); } +extern void ceph_put_cap(struct ceph_cap *cap); extern void ceph_queue_caps_release(struct inode *inode); extern int ceph_write_inode(struct inode *inode, int unused); -- cgit v1.2.3 From 9794b146fa7b93f8ab74fb62d67fdefad760769f Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 16 Feb 2010 15:53:32 -0800 Subject: ceph: fix memory leak when destroying osdmap with pg_temp mappings Also move _lookup_pg_mapping into a helper. Signed-off-by: Sage Weil --- fs/ceph/osdmap.c | 49 +++++++++++++++++++++++++++++++++---------------- 1 file changed, 33 insertions(+), 16 deletions(-) diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c index a6afe3836f7e..443fdcdb19c4 100644 --- a/fs/ceph/osdmap.c +++ b/fs/ceph/osdmap.c @@ -321,8 +321,13 @@ void ceph_osdmap_destroy(struct ceph_osdmap *map) dout("osdmap_destroy %p\n", map); if (map->crush) crush_destroy(map->crush); - while (!RB_EMPTY_ROOT(&map->pg_temp)) - rb_erase(rb_first(&map->pg_temp), &map->pg_temp); + while (!RB_EMPTY_ROOT(&map->pg_temp)) { + struct ceph_pg_mapping *pg = + rb_entry(rb_first(&map->pg_temp), + struct ceph_pg_mapping, node); + rb_erase(&pg->node, &map->pg_temp); + kfree(pg); + } kfree(map->osd_state); kfree(map->osd_weight); kfree(map->pg_pool); @@ -367,7 +372,8 @@ static int osdmap_set_max_osd(struct ceph_osdmap *map, int max) } /* - * Insert a new pg_temp mapping + * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid + * to a set of osds) */ static int pgid_cmp(struct ceph_pg l, struct ceph_pg r) { @@ -406,6 +412,26 @@ static int __insert_pg_mapping(struct ceph_pg_mapping *new, return 0; } +static struct ceph_pg_mapping *__lookup_pg_mapping(struct rb_root *root, + struct ceph_pg pgid) +{ + struct rb_node *n = root->rb_node; + struct ceph_pg_mapping *pg; + int c; + + while (n) { + pg = rb_entry(n, struct ceph_pg_mapping, node); + c = pgid_cmp(pgid, pg->pgid); + if (c < 0) + n = n->rb_left; + else if (c > 0) + n = n->rb_right; + else + return pg; + } + return NULL; +} + /* * decode a full map. */ @@ -870,26 +896,17 @@ int ceph_calc_object_layout(struct ceph_object_layout *ol, static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid, int *osds, int *num) { - struct rb_node *n = osdmap->pg_temp.rb_node; struct ceph_pg_mapping *pg; struct ceph_pg_pool_info *pool; int ruleno; unsigned poolid, ps, pps; int preferred; - int c; /* pg_temp? */ - while (n) { - pg = rb_entry(n, struct ceph_pg_mapping, node); - c = pgid_cmp(pgid, pg->pgid); - if (c < 0) - n = n->rb_left; - else if (c > 0) - n = n->rb_right; - else { - *num = pg->len; - return pg->osds; - } + pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid); + if (pg) { + *num = pg->len; + return pg->osds; } /* crush */ -- cgit v1.2.3 From 4fc51be8fa7043ff9a1e34fef0e99214373332ac Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 16 Feb 2010 15:55:03 -0800 Subject: ceph: use rbtree for pg pools; decode new osdmap format Since we can now create and destroy pg pools, the pool ids will be sparse, and an array no longer makes sense for looking up by pool id. Use an rbtree instead. The OSDMap encoding also no longer has a max pool count (previously used to allocate the array). There is a new pool_max, that is the largest pool id we've ever used, although we don't actually need it in the client. Signed-off-by: Sage Weil --- fs/ceph/debugfs.c | 7 +-- fs/ceph/osdmap.c | 136 +++++++++++++++++++++++++++++++++++++----------------- fs/ceph/osdmap.h | 7 +-- fs/ceph/rados.h | 4 +- 4 files changed, 104 insertions(+), 50 deletions(-) diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c index 1a47b5c25b5f..e159f1415110 100644 --- a/fs/ceph/debugfs.c +++ b/fs/ceph/debugfs.c @@ -78,6 +78,7 @@ static int osdmap_show(struct seq_file *s, void *p) { int i; struct ceph_client *client = s->private; + struct rb_node *n; if (client->osdc.osdmap == NULL) return 0; @@ -87,11 +88,11 @@ static int osdmap_show(struct seq_file *s, void *p) " NEARFULL" : "", (client->osdc.osdmap->flags & CEPH_OSDMAP_FULL) ? " FULL" : ""); - for (i = 0; i < client->osdc.osdmap->num_pools; i++) { + for (n = rb_first(&client->osdc.osdmap->pg_pools); n; n = rb_next(n)) { struct ceph_pg_pool_info *pool = - &client->osdc.osdmap->pg_pool[i]; + rb_entry(n, struct ceph_pg_pool_info, node); seq_printf(s, "pg_pool %d pg_num %d / %d, lpg_num %d / %d\n", - i, pool->v.pg_num, pool->pg_num_mask, + pool->id, pool->v.pg_num, pool->pg_num_mask, pool->v.lpg_num, pool->lpg_num_mask); } for (i = 0; i < client->osdc.osdmap->max_osd; i++) { diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c index 443fdcdb19c4..34b5696c84fd 100644 --- a/fs/ceph/osdmap.c +++ b/fs/ceph/osdmap.c @@ -328,9 +328,15 @@ void ceph_osdmap_destroy(struct ceph_osdmap *map) rb_erase(&pg->node, &map->pg_temp); kfree(pg); } + while (!RB_EMPTY_ROOT(&map->pg_pools)) { + struct ceph_pg_pool_info *pi = + rb_entry(rb_first(&map->pg_pools), + struct ceph_pg_pool_info, node); + rb_erase(&pi->node, &map->pg_pools); + kfree(pi); + } kfree(map->osd_state); kfree(map->osd_weight); - kfree(map->pg_pool); kfree(map->osd_addr); kfree(map); } @@ -432,6 +438,48 @@ static struct ceph_pg_mapping *__lookup_pg_mapping(struct rb_root *root, return NULL; } +/* + * rbtree of pg pool info + */ +static int __insert_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *new) +{ + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; + struct ceph_pg_pool_info *pi = NULL; + + while (*p) { + parent = *p; + pi = rb_entry(parent, struct ceph_pg_pool_info, node); + if (new->id < pi->id) + p = &(*p)->rb_left; + else if (new->id > pi->id) + p = &(*p)->rb_right; + else + return -EEXIST; + } + + rb_link_node(&new->node, parent, p); + rb_insert_color(&new->node, root); + return 0; +} + +static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, int id) +{ + struct ceph_pg_pool_info *pi; + struct rb_node *n = root->rb_node; + + while (n) { + pi = rb_entry(n, struct ceph_pg_pool_info, node); + if (id < pi->id) + n = n->rb_left; + else if (id > pi->id) + n = n->rb_right; + else + return pi; + } + return NULL; +} + /* * decode a full map. */ @@ -443,6 +491,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) u8 ev; int err = -EINVAL; void *start = *p; + struct ceph_pg_pool_info *pi; dout("osdmap_decode %p to %p len %d\n", *p, end, (int)(end - *p)); @@ -464,32 +513,27 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) ceph_decode_copy(p, &map->created, sizeof(map->created)); ceph_decode_copy(p, &map->modified, sizeof(map->modified)); - map->num_pools = ceph_decode_32(p); - map->pg_pool = kcalloc(map->num_pools, sizeof(*map->pg_pool), - GFP_NOFS); - if (!map->pg_pool) { - err = -ENOMEM; - goto bad; - } ceph_decode_32_safe(p, end, max, bad); while (max--) { - ceph_decode_need(p, end, 4+1+sizeof(map->pg_pool->v), bad); - i = ceph_decode_32(p); - if (i >= map->num_pools) + ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad); + pi = kmalloc(sizeof(*pi), GFP_NOFS); + if (!pi) goto bad; + pi->id = ceph_decode_32(p); ev = ceph_decode_8(p); /* encoding version */ if (ev > CEPH_PG_POOL_VERSION) { pr_warning("got unknown v %d > %d of ceph_pg_pool\n", ev, CEPH_PG_POOL_VERSION); goto bad; } - ceph_decode_copy(p, &map->pg_pool[i].v, - sizeof(map->pg_pool->v)); - calc_pg_masks(&map->pg_pool[i]); - p += le32_to_cpu(map->pg_pool[i].v.num_snaps) * sizeof(u64); - p += le32_to_cpu(map->pg_pool[i].v.num_removed_snap_intervals) + ceph_decode_copy(p, &pi->v, sizeof(pi->v)); + __insert_pg_pool(&map->pg_pools, pi); + calc_pg_masks(pi); + p += le32_to_cpu(pi->v.num_snaps) * sizeof(u64); + p += le32_to_cpu(pi->v.num_removed_snap_intervals) * sizeof(u64) * 2; } + ceph_decode_32_safe(p, end, map->pool_max, bad); ceph_decode_32_safe(p, end, map->flags, bad); @@ -581,7 +625,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, u32 epoch = 0; struct ceph_timespec modified; u32 len, pool; - __s32 new_flags, max; + __s32 new_pool_max, new_flags, max; void *start = *p; int err = -EINVAL; u16 version; @@ -600,6 +644,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, epoch = ceph_decode_32(p); BUG_ON(epoch != map->epoch+1); ceph_decode_copy(p, &modified, sizeof(modified)); + new_pool_max = ceph_decode_32(p); new_flags = ceph_decode_32(p); /* full map? */ @@ -623,6 +668,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, /* new flags? */ if (new_flags >= 0) map->flags = new_flags; + if (new_pool_max >= 0) + map->pool_max = new_pool_max; ceph_decode_need(p, end, 5*sizeof(u32), bad); @@ -647,37 +694,42 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, ceph_decode_32_safe(p, end, len, bad); while (len--) { __u8 ev; + struct ceph_pg_pool_info *pi; ceph_decode_32_safe(p, end, pool, bad); - if (pool >= map->num_pools) { - void *pg_pool = kcalloc(pool + 1, - sizeof(*map->pg_pool), - GFP_NOFS); - if (!pg_pool) { - err = -ENOMEM; - goto bad; - } - memcpy(pg_pool, map->pg_pool, - map->num_pools * sizeof(*map->pg_pool)); - kfree(map->pg_pool); - map->pg_pool = pg_pool; - map->num_pools = pool+1; - } - ceph_decode_need(p, end, 1 + sizeof(map->pg_pool->v), bad); + ceph_decode_need(p, end, 1 + sizeof(pi->v), bad); ev = ceph_decode_8(p); /* encoding version */ if (ev > CEPH_PG_POOL_VERSION) { pr_warning("got unknown v %d > %d of ceph_pg_pool\n", ev, CEPH_PG_POOL_VERSION); goto bad; } - ceph_decode_copy(p, &map->pg_pool[pool].v, - sizeof(map->pg_pool->v)); - calc_pg_masks(&map->pg_pool[pool]); + pi = __lookup_pg_pool(&map->pg_pools, pool); + if (!pi) { + pi = kmalloc(sizeof(*pi), GFP_NOFS); + if (!pi) { + err = -ENOMEM; + goto bad; + } + pi->id = pool; + __insert_pg_pool(&map->pg_pools, pi); + } + ceph_decode_copy(p, &pi->v, sizeof(pi->v)); + calc_pg_masks(pi); } - /* old_pool (ignore) */ + /* old_pool */ ceph_decode_32_safe(p, end, len, bad); - *p += len * sizeof(u32); + while (len--) { + struct ceph_pg_pool_info *pi; + + ceph_decode_32_safe(p, end, pool, bad); + pi = __lookup_pg_pool(&map->pg_pools, pool); + if (pi) { + rb_erase(&pi->node, &map->pg_pools); + kfree(pi); + } + } /* new_up */ err = -EINVAL; @@ -861,10 +913,10 @@ int ceph_calc_object_layout(struct ceph_object_layout *ol, unsigned ps; BUG_ON(!osdmap); - if (poolid >= osdmap->num_pools) - return -EIO; - pool = &osdmap->pg_pool[poolid]; + pool = __lookup_pg_pool(&osdmap->pg_pools, poolid); + if (!pool) + return -EIO; ps = ceph_str_hash(pool->v.object_hash, oid, strlen(oid)); if (preferred >= 0) { ps += preferred; @@ -919,9 +971,9 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid, preferred >= osdmap->crush->max_devices) preferred = -1; - if (poolid >= osdmap->num_pools) + pool = __lookup_pg_pool(&osdmap->pg_pools, poolid); + if (!pool) return NULL; - pool = &osdmap->pg_pool[poolid]; ruleno = crush_find_rule(osdmap->crush, pool->v.crush_ruleset, pool->v.type, pool->v.size); if (ruleno < 0) { diff --git a/fs/ceph/osdmap.h b/fs/ceph/osdmap.h index c4af8418aa00..1fb55afb2642 100644 --- a/fs/ceph/osdmap.h +++ b/fs/ceph/osdmap.h @@ -19,6 +19,8 @@ * the change between two successive epochs, or as a fully encoded map. */ struct ceph_pg_pool_info { + struct rb_node node; + int id; struct ceph_pg_pool v; int pg_num_mask, pgp_num_mask, lpg_num_mask, lpgp_num_mask; }; @@ -44,9 +46,8 @@ struct ceph_osdmap { struct ceph_entity_addr *osd_addr; struct rb_root pg_temp; - - u32 num_pools; - struct ceph_pg_pool_info *pg_pool; + struct rb_root pg_pools; + u32 pool_max; /* the CRUSH map specifies the mapping of placement groups to * the list of osds that store+replicate them. */ diff --git a/fs/ceph/rados.h b/fs/ceph/rados.h index 1f4c78640541..26ac8b89a676 100644 --- a/fs/ceph/rados.h +++ b/fs/ceph/rados.h @@ -11,8 +11,8 @@ /* * osdmap encoding versions */ -#define CEPH_OSDMAP_INC_VERSION 3 -#define CEPH_OSDMAP_VERSION 3 +#define CEPH_OSDMAP_INC_VERSION 4 +#define CEPH_OSDMAP_VERSION 4 /* * fs id -- cgit v1.2.3 From a17d6473cc9eb64a5b41c568310aa73824ebaa64 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 17 Feb 2010 13:56:07 -0800 Subject: ceph: v0.19 release Signed-off-by: Sage Weil --- fs/ceph/ceph_fs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/ceph/ceph_fs.h b/fs/ceph/ceph_fs.h index 004aae59d4ba..0c2241ef3653 100644 --- a/fs/ceph/ceph_fs.h +++ b/fs/ceph/ceph_fs.h @@ -19,7 +19,7 @@ * Ceph release version */ #define CEPH_VERSION_MAJOR 0 -#define CEPH_VERSION_MINOR 18 +#define CEPH_VERSION_MINOR 19 #define CEPH_VERSION_PATCH 0 #define _CEPH_STRINGIFY(x) #x -- cgit v1.2.3 From 2c27c9a57c93a0757b9b4b0e7dc1abeaf1db1ce2 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 17 Feb 2010 15:45:51 -0800 Subject: ceph: fix typo in ceph_queue_writeback debug output Signed-off-by: Sage Weil --- fs/ceph/inode.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 58bdff09c2c1..d7d5d4923772 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -1247,10 +1247,10 @@ void ceph_queue_writeback(struct inode *inode) { if (queue_work(ceph_inode_to_client(inode)->wb_wq, &ceph_inode(inode)->i_wb_work)) { - dout("ceph_queue_invalidate %p\n", inode); + dout("ceph_queue_writeback %p\n", inode); igrab(inode); } else { - dout("ceph_queue_invalidate %p failed\n", inode); + dout("ceph_queue_writeback %p failed\n", inode); } } -- cgit v1.2.3 From 5ecad6fd7bfd30b3eaea51345f546b81de7a6473 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 17 Feb 2010 10:43:37 -0800 Subject: ceph: fix check for invalidate_mapping_pages success We need to know whether there was any page left behind, and not the return value (the total number of pages invalidated). Look at the mapping to see if we were successful or not. Move it all into a helper to simplify the two callers. Signed-off-by: Yehuda Sadeh Signed-off-by: Sage Weil --- fs/ceph/caps.c | 82 +++++++++++++++++++++++++++++++++++----------------------- 1 file changed, 50 insertions(+), 32 deletions(-) diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 4958a2ef3e04..e1e6df0f549e 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -1361,6 +1361,41 @@ static int __mark_caps_flushing(struct inode *inode, return flushing; } +/* + * try to invalidate mapping pages without blocking. + */ +static int mapping_is_empty(struct address_space *mapping) +{ + struct page *page = find_get_page(mapping, 0); + + if (!page) + return 1; + + put_page(page); + return 0; +} + +static int try_nonblocking_invalidate(struct inode *inode) +{ + struct ceph_inode_info *ci = ceph_inode(inode); + u32 invalidating_gen = ci->i_rdcache_gen; + + spin_unlock(&inode->i_lock); + invalidate_mapping_pages(&inode->i_data, 0, -1); + spin_lock(&inode->i_lock); + + if (mapping_is_empty(&inode->i_data) && + invalidating_gen == ci->i_rdcache_gen) { + /* success. */ + dout("try_nonblocking_invalidate %p success\n", inode); + ci->i_rdcache_gen = 0; + ci->i_rdcache_revoking = 0; + return 0; + } + dout("try_nonblocking_invalidate %p failed\n", inode); + return -1; +} + /* * Swiss army knife function to examine currently used and wanted * versus held caps. Release, flush, ack revoked caps to mds as @@ -1451,27 +1486,19 @@ retry_locked: (file_wanted == 0 || /* no open files */ (revoking & CEPH_CAP_FILE_CACHE)) && /* or revoking cache */ !tried_invalidate) { - u32 invalidating_gen = ci->i_rdcache_gen; - int ret; - dout("check_caps trying to invalidate on %p\n", inode); - spin_unlock(&inode->i_lock); - ret = invalidate_mapping_pages(&inode->i_data, 0, -1); - spin_lock(&inode->i_lock); - if (ret == 0 && invalidating_gen == ci->i_rdcache_gen) { - /* success. */ - ci->i_rdcache_gen = 0; - ci->i_rdcache_revoking = 0; - } else if (revoking & CEPH_CAP_FILE_CACHE) { - dout("check_caps queuing invalidate\n"); - queue_invalidate = 1; - ci->i_rdcache_revoking = ci->i_rdcache_gen; - } else { - dout("check_caps failed to invalidate pages\n"); - /* we failed to invalidate pages. check these - caps again later. */ - force_requeue = 1; - __cap_set_timeouts(mdsc, ci); + if (try_nonblocking_invalidate(inode) < 0) { + if (revoking & CEPH_CAP_FILE_CACHE) { + dout("check_caps queuing invalidate\n"); + queue_invalidate = 1; + ci->i_rdcache_revoking = ci->i_rdcache_gen; + } else { + dout("check_caps failed to invalidate pages\n"); + /* we failed to invalidate pages. check these + caps again later. */ + force_requeue = 1; + __cap_set_timeouts(mdsc, ci); + } } tried_invalidate = 1; goto retry_locked; @@ -2184,7 +2211,6 @@ static int handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant, int revoked_rdcache = 0; int queue_invalidate = 0; int tried_invalidate = 0; - int ret; dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n", inode, cap, mds, seq, ceph_cap_string(newcaps)); @@ -2199,24 +2225,16 @@ static int handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant, restart: if (((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) && !ci->i_wrbuffer_ref && !tried_invalidate) { - dout("CACHE invalidation\n"); - spin_unlock(&inode->i_lock); tried_invalidate = 1; - - ret = invalidate_mapping_pages(&inode->i_data, 0, -1); - spin_lock(&inode->i_lock); - if (ret < 0) { + if (try_nonblocking_invalidate(inode) == 0) { + revoked_rdcache = 1; + } else { /* there were locked pages.. invalidate later in a separate thread. */ if (ci->i_rdcache_revoking != ci->i_rdcache_gen) { queue_invalidate = 1; ci->i_rdcache_revoking = ci->i_rdcache_gen; } - } else { - /* we successfully invalidated those pages */ - revoked_rdcache = 1; - ci->i_rdcache_gen = 0; - ci->i_rdcache_revoking = 0; } goto restart; } -- cgit v1.2.3 From e63dc5c780ba32d6d8b3662eecce2b8d96489b41 Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Fri, 19 Feb 2010 00:07:01 +0000 Subject: ceph: remove page upon writeback completion if lost cache cap This page should have been removed earlier when the cache cap was revoked, but a writeback was in flight, so it was skipped. We truncate it here just as the writeback finishes, while it's still locked. Signed-off-by: Yehuda Sadeh Signed-off-by: Sage Weil --- fs/ceph/addr.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 71f5ad1c1e26..25360d517d1b 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -514,6 +514,7 @@ static void writepages_finish(struct ceph_osd_request *req, u64 bytes = 0; struct ceph_client *client = ceph_inode_to_client(inode); long writeback_stat; + unsigned issued = __ceph_caps_issued(ci, NULL); /* parse reply */ replyhead = msg->front.iov_base; @@ -559,6 +560,16 @@ static void writepages_finish(struct ceph_osd_request *req, ceph_put_snap_context(snapc); dout("unlocking %d %p\n", i, page); end_page_writeback(page); + + /* + * We lost the cache cap, need to truncate the page before + * it is unlocked, otherwise we'd truncate it later in the + * page truncation thread, possibly losing some data that + * raced its way in + */ + if ((issued & CEPH_CAP_FILE_CACHE) == 0) + generic_error_remove_page(inode->i_mapping, page); + unlock_page(page); } dout("%p wrote+cleaned %d pages\n", inode, wrote); -- cgit v1.2.3 From c9af9fb68e01eb2c2165e1bc45cfeeed510c64e6 Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Fri, 19 Feb 2010 00:10:11 +0000 Subject: ceph: don't truncate dirty pages in invalidate work thread Instead of truncating the whole range of pages, we skip those pages that are dirty or in the middle of writeback. Those pages will be cleared later when the writeback completes. Signed-off-by: Yehuda Sadeh Signed-off-by: Sage Weil --- fs/ceph/inode.c | 46 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 45 insertions(+), 1 deletion(-) diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index d7d5d4923772..7abe1aed819b 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -10,6 +10,7 @@ #include #include #include +#include #include "super.h" #include "decode.h" @@ -1279,6 +1280,49 @@ void ceph_queue_invalidate(struct inode *inode) } } +/* + * invalidate any pages that are not dirty or under writeback. this + * includes pages that are clean and mapped. + */ +static void ceph_invalidate_nondirty_pages(struct address_space *mapping) +{ + struct pagevec pvec; + pgoff_t next = 0; + int i; + + pagevec_init(&pvec, 0); + while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { + for (i = 0; i < pagevec_count(&pvec); i++) { + struct page *page = pvec.pages[i]; + pgoff_t index; + int skip_page = + (PageDirty(page) || PageWriteback(page)); + + if (!skip_page) + skip_page = !trylock_page(page); + + /* + * We really shouldn't be looking at the ->index of an + * unlocked page. But we're not allowed to lock these + * pages. So we rely upon nobody altering the ->index + * of this (pinned-by-us) page. + */ + index = page->index; + if (index > next) + next = index; + next++; + + if (skip_page) + continue; + + generic_error_remove_page(mapping, page); + unlock_page(page); + } + pagevec_release(&pvec); + cond_resched(); + } +} + /* * Invalidate inode pages in a worker thread. (This can't be done * in the message handler context.) @@ -1305,7 +1349,7 @@ static void ceph_invalidate_work(struct work_struct *work) orig_gen = ci->i_rdcache_gen; spin_unlock(&inode->i_lock); - truncate_inode_pages(&inode->i_data, 0); + ceph_invalidate_nondirty_pages(inode->i_mapping); spin_lock(&inode->i_lock); if (orig_gen == ci->i_rdcache_gen) { -- cgit v1.2.3 From bcd2cbd10ce31c950a40c08d7f601f8ff23537b8 Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Fri, 19 Feb 2010 00:12:21 +0000 Subject: ceph: cleanup redundant code in handle_cap_grant There is no state in local vars that requires us to loop after temporarily dropping i_lock. Signed-off-by: Yehuda Sadeh Signed-off-by: Sage Weil --- fs/ceph/caps.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index e1e6df0f549e..289f6c65a17e 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -2210,7 +2210,6 @@ static int handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant, int writeback = 0; int revoked_rdcache = 0; int queue_invalidate = 0; - int tried_invalidate = 0; dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n", inode, cap, mds, seq, ceph_cap_string(newcaps)); @@ -2222,10 +2221,8 @@ static int handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant, * try to invalidate (once). (If there are dirty buffers, we * will invalidate _after_ writeback.) */ -restart: if (((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) && - !ci->i_wrbuffer_ref && !tried_invalidate) { - tried_invalidate = 1; + !ci->i_wrbuffer_ref) { if (try_nonblocking_invalidate(inode) == 0) { revoked_rdcache = 1; } else { @@ -2236,7 +2233,6 @@ restart: ci->i_rdcache_revoking = ci->i_rdcache_gen; } } - goto restart; } /* side effects now are allowed */ -- cgit v1.2.3 From 5b3a4db3e4009aff918abb1353eb3f4925393a7b Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 19 Feb 2010 21:43:23 -0800 Subject: ceph: fix up unexpected message handling Fix skipping of unexpected message types from osd, mon. Clean up pr_info and debug output. Signed-off-by: Sage Weil --- fs/ceph/messenger.c | 5 +++-- fs/ceph/mon_client.c | 14 +++++++++----- fs/ceph/osd_client.c | 41 +++++++++++++++++++++++++++++++---------- 3 files changed, 43 insertions(+), 17 deletions(-) diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index ca2ad0e5bb28..fdda707aa137 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -1361,7 +1361,7 @@ static int read_partial_message(struct ceph_connection *con) con->in_msg = ceph_alloc_msg(con, &con->in_hdr, &skip); if (skip) { /* skip this message */ - pr_err("alloc_msg returned NULL, skipping message\n"); + dout("alloc_msg returned NULL, skipping message\n"); con->in_base_pos = -front_len - middle_len - data_len - sizeof(m->footer); con->in_tag = CEPH_MSGR_TAG_READY; @@ -1370,7 +1370,8 @@ static int read_partial_message(struct ceph_connection *con) if (IS_ERR(con->in_msg)) { ret = PTR_ERR(con->in_msg); con->in_msg = NULL; - con->error_msg = "error allocating memory for incoming message"; + con->error_msg = + "error allocating memory for incoming message"; return ret; } m = con->in_msg; diff --git a/fs/ceph/mon_client.c b/fs/ceph/mon_client.c index 40d7d90bbed1..890597c09d43 100644 --- a/fs/ceph/mon_client.c +++ b/fs/ceph/mon_client.c @@ -763,7 +763,7 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con, struct ceph_mon_client *monc = con->private; int type = le16_to_cpu(hdr->type); int front_len = le32_to_cpu(hdr->front_len); - struct ceph_msg *m; + struct ceph_msg *m = NULL; *skip = 0; @@ -777,13 +777,17 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con, case CEPH_MSG_AUTH_REPLY: m = ceph_msgpool_get(&monc->msgpool_auth_reply, front_len); break; - default: - return NULL; + case CEPH_MSG_MON_MAP: + case CEPH_MSG_MDS_MAP: + case CEPH_MSG_OSD_MAP: + m = ceph_msg_new(type, front_len, 0, 0, NULL); + break; } - if (!m) + if (!m) { + pr_info("alloc_msg unknown type %d\n", type); *skip = 1; - + } return m; } diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index fa0f73703954..ffd819c5a5dd 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -1396,31 +1396,30 @@ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) ceph_msg_put(msg); } -static struct ceph_msg *alloc_msg(struct ceph_connection *con, +/* + * lookup and return message for incoming reply + */ +static struct ceph_msg *get_reply(struct ceph_connection *con, struct ceph_msg_header *hdr, int *skip) { struct ceph_osd *osd = con->private; struct ceph_osd_client *osdc = osd->o_osdc; - int type = le16_to_cpu(hdr->type); - int front = le32_to_cpu(hdr->front_len); - int data_len = le32_to_cpu(hdr->data_len); struct ceph_msg *m; struct ceph_osd_request *req; + int front = le32_to_cpu(hdr->front_len); + int data_len = le32_to_cpu(hdr->data_len); u64 tid; int err; - *skip = 0; - if (type != CEPH_MSG_OSD_OPREPLY) - return NULL; - tid = le64_to_cpu(hdr->tid); mutex_lock(&osdc->request_mutex); req = __lookup_request(osdc, tid); if (!req) { *skip = 1; m = NULL; - dout("alloc_msg unknown tid %llu\n", tid); + pr_info("alloc_msg unknown tid %llu from osd%d\n", tid, + osd->o_osd); goto out; } m = __get_next_reply(con, req, front); @@ -1437,11 +1436,33 @@ static struct ceph_msg *alloc_msg(struct ceph_connection *con, m = ERR_PTR(err); } } + *skip = 0; out: mutex_unlock(&osdc->request_mutex); - return m; + +} + +static struct ceph_msg *alloc_msg(struct ceph_connection *con, + struct ceph_msg_header *hdr, + int *skip) +{ + struct ceph_osd *osd = con->private; + int type = le16_to_cpu(hdr->type); + int front = le32_to_cpu(hdr->front_len); + + switch (type) { + case CEPH_MSG_OSD_MAP: + return ceph_msg_new(type, front, 0, 0, NULL); + case CEPH_MSG_OSD_OPREPLY: + return get_reply(con, hdr, skip); + default: + pr_info("alloc_msg unexpected msg type %d from osd%d\n", type, + osd->o_osd); + *skip = 1; + return NULL; + } } /* -- cgit v1.2.3 From 4ce1e9adabbad8f2c45ceeeb6de56cc368484297 Mon Sep 17 00:00:00 2001 From: Alexander Beregalov Date: Mon, 22 Feb 2010 17:17:44 +0300 Subject: ceph: move dereference after NULL test Signed-off-by: Alexander Beregalov Signed-off-by: Sage Weil --- fs/ceph/addr.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 25360d517d1b..23bb0ceabe31 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -144,7 +144,7 @@ static int ceph_set_page_dirty(struct page *page) */ static void ceph_invalidatepage(struct page *page, unsigned long offset) { - struct inode *inode = page->mapping->host; + struct inode *inode; struct ceph_inode_info *ci; struct ceph_snap_context *snapc = (void *)page->private; @@ -153,6 +153,8 @@ static void ceph_invalidatepage(struct page *page, unsigned long offset) BUG_ON(!PagePrivate(page)); BUG_ON(!page->mapping); + inode = page->mapping->host; + /* * We can get non-dirty pages here due to races between * set_page_dirty and truncate_complete_page; just spit out a -- cgit v1.2.3 From a6369741c48815fedfce7072b7a9cd98b5bafd56 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 22 Feb 2010 13:59:00 -0800 Subject: ceph: fix comments, locking in destroy_inode The destroy_inode path needs no inode locks since there are no inode references. Update __ceph_remove_cap comment to reflect that it is called without cap->session->s_mutex in this case. Signed-off-by: Sage Weil --- fs/ceph/caps.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 289f6c65a17e..b6154ffe70df 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -856,8 +856,8 @@ static int __ceph_is_any_caps(struct ceph_inode_info *ci) } /* - * caller should hold i_lock, and session s_mutex. - * returns true if this is the last cap. if so, caller should iput. + * caller should hold i_lock. + * caller will not hold session s_mutex if called from destroy_inode. */ void __ceph_remove_cap(struct ceph_cap *cap) { @@ -974,15 +974,14 @@ static int send_cap_msg(struct ceph_mds_session *session, } /* - * Queue cap releases when an inode is dropped from our - * cache. + * Queue cap releases when an inode is dropped from our cache. Since + * inode is about to be destroyed, there is no need for i_lock. */ void ceph_queue_caps_release(struct inode *inode) { struct ceph_inode_info *ci = ceph_inode(inode); struct rb_node *p; - spin_lock(&inode->i_lock); p = rb_first(&ci->i_caps); while (p) { struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node); @@ -1024,9 +1023,7 @@ void ceph_queue_caps_release(struct inode *inode) spin_unlock(&session->s_cap_lock); p = rb_next(p); __ceph_remove_cap(cap); - } - spin_unlock(&inode->i_lock); } /* -- cgit v1.2.3 From 2600d2dd5085ab6fb09540226138a60055abf335 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 22 Feb 2010 15:12:16 -0800 Subject: ceph: drop messages on unregistered mds sessions; cleanup Verify the mds session is currently registered before handling incoming messages. Clean up message handlers to pull mds out of session->s_mds instead of less trustworthy src field. Clean up con_{get,put} debug output. Signed-off-by: Sage Weil --- fs/ceph/caps.c | 2 +- fs/ceph/mds_client.c | 85 ++++++++++++++++++++++++++-------------------------- fs/ceph/snap.c | 17 ++--------- fs/ceph/super.h | 1 + 4 files changed, 46 insertions(+), 59 deletions(-) diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index b6154ffe70df..bb846164addc 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -2600,7 +2600,7 @@ void ceph_handle_caps(struct ceph_mds_session *session, struct inode *inode; struct ceph_cap *cap; struct ceph_mds_caps *h; - int mds = le64_to_cpu(msg->hdr.src.name.num); + int mds = session->s_mds; int op; u32 seq; struct ceph_vino vino; diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 124c0c17a14a..4d00ea2af000 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -309,6 +309,15 @@ static bool __have_session(struct ceph_mds_client *mdsc, int mds) return mdsc->sessions[mds]; } +static int __verify_registered_session(struct ceph_mds_client *mdsc, + struct ceph_mds_session *s) +{ + if (s->s_mds >= mdsc->max_sessions || + mdsc->sessions[s->s_mds] != s) + return -ENOENT; + return 0; +} + /* * create+register a new session for given mds. * called under mdsc->mutex. @@ -382,10 +391,11 @@ fail_realloc: /* * called under mdsc->mutex */ -static void unregister_session(struct ceph_mds_client *mdsc, +static void __unregister_session(struct ceph_mds_client *mdsc, struct ceph_mds_session *s) { - dout("unregister_session mds%d %p\n", s->s_mds, s); + dout("__unregister_session mds%d %p\n", s->s_mds, s); + BUG_ON(mdsc->sessions[s->s_mds] != s); mdsc->sessions[s->s_mds] = NULL; ceph_con_close(&s->s_con); ceph_put_mds_session(s); @@ -1740,10 +1750,8 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) struct ceph_mds_reply_info_parsed *rinfo; /* parsed reply info */ u64 tid; int err, result; - int mds; + int mds = session->s_mds; - if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS) - return; if (msg->front.iov_len < sizeof(*head)) { pr_err("mdsc_handle_reply got corrupt (short) reply\n"); ceph_msg_dump(msg); @@ -1760,7 +1768,6 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) return; } dout("handle_reply %p\n", req); - mds = le64_to_cpu(msg->hdr.src.name.num); /* correct session? */ if (!req->r_session && req->r_session != session) { @@ -1884,7 +1891,9 @@ out: /* * handle mds notification that our request has been forwarded. */ -static void handle_forward(struct ceph_mds_client *mdsc, struct ceph_msg *msg) +static void handle_forward(struct ceph_mds_client *mdsc, + struct ceph_mds_session *session, + struct ceph_msg *msg) { struct ceph_mds_request *req; u64 tid; @@ -1894,11 +1903,7 @@ static void handle_forward(struct ceph_mds_client *mdsc, struct ceph_msg *msg) int err = -EINVAL; void *p = msg->front.iov_base; void *end = p + msg->front.iov_len; - int from_mds, state; - - if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS) - goto bad; - from_mds = le64_to_cpu(msg->hdr.src.name.num); + int state; ceph_decode_need(&p, end, sizeof(u64)+2*sizeof(u32), bad); tid = ceph_decode_64(&p); @@ -1915,6 +1920,9 @@ static void handle_forward(struct ceph_mds_client *mdsc, struct ceph_msg *msg) goto out; /* dup reply? */ } + if (next_mds >= mdsc->max_sessions) + goto out; + state = mdsc->sessions[next_mds]->s_state; if (fwd_seq <= req->r_num_fwd) { dout("forward %llu to mds%d - old seq %d <= %d\n", @@ -1945,14 +1953,10 @@ static void handle_session(struct ceph_mds_session *session, struct ceph_mds_client *mdsc = session->s_mdsc; u32 op; u64 seq; - int mds; + int mds = session->s_mds; struct ceph_mds_session_head *h = msg->front.iov_base; int wake = 0; - if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS) - return; - mds = le64_to_cpu(msg->hdr.src.name.num); - /* decode */ if (msg->front.iov_len != sizeof(*h)) goto bad; @@ -1960,6 +1964,8 @@ static void handle_session(struct ceph_mds_session *session, seq = le64_to_cpu(h->seq); mutex_lock(&mdsc->mutex); + if (op == CEPH_SESSION_CLOSE) + __unregister_session(mdsc, session); /* FIXME: this ttl calculation is generous */ session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose; mutex_unlock(&mdsc->mutex); @@ -1990,7 +1996,6 @@ static void handle_session(struct ceph_mds_session *session, break; case CEPH_SESSION_CLOSE: - unregister_session(mdsc, session); remove_session_caps(session); wake = 1; /* for good measure */ complete(&mdsc->session_close_waiters); @@ -2269,7 +2274,7 @@ static void check_new_map(struct ceph_mds_client *mdsc, /* the session never opened, just close it * out now */ __wake_requests(mdsc, &s->s_waiting); - unregister_session(mdsc, s); + __unregister_session(mdsc, s); } else { /* just close it */ mutex_unlock(&mdsc->mutex); @@ -2329,24 +2334,22 @@ void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry) di->lease_session = NULL; } -static void handle_lease(struct ceph_mds_client *mdsc, struct ceph_msg *msg) +static void handle_lease(struct ceph_mds_client *mdsc, + struct ceph_mds_session *session, + struct ceph_msg *msg) { struct super_block *sb = mdsc->client->sb; struct inode *inode; - struct ceph_mds_session *session; struct ceph_inode_info *ci; struct dentry *parent, *dentry; struct ceph_dentry_info *di; - int mds; + int mds = session->s_mds; struct ceph_mds_lease *h = msg->front.iov_base; struct ceph_vino vino; int mask; struct qstr dname; int release = 0; - if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS) - return; - mds = le64_to_cpu(msg->hdr.src.name.num); dout("handle_lease from mds%d\n", mds); /* decode */ @@ -2360,15 +2363,6 @@ static void handle_lease(struct ceph_mds_client *mdsc, struct ceph_msg *msg) if (dname.len != get_unaligned_le32(h+1)) goto bad; - /* find session */ - mutex_lock(&mdsc->mutex); - session = __ceph_lookup_mds_session(mdsc, mds); - mutex_unlock(&mdsc->mutex); - if (!session) { - pr_err("handle_lease got lease but no session mds%d\n", mds); - return; - } - mutex_lock(&session->s_mutex); session->s_seq++; @@ -2437,7 +2431,6 @@ release: out: iput(inode); mutex_unlock(&session->s_mutex); - ceph_put_mds_session(session); return; bad: @@ -2794,7 +2787,7 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc) for (i = 0; i < mdsc->max_sessions; i++) { if (mdsc->sessions[i]) { session = get_session(mdsc->sessions[i]); - unregister_session(mdsc, session); + __unregister_session(mdsc, session); mutex_unlock(&mdsc->mutex); mutex_lock(&session->s_mutex); remove_session_caps(session); @@ -2891,8 +2884,7 @@ static struct ceph_connection *con_get(struct ceph_connection *con) struct ceph_mds_session *s = con->private; if (get_session(s)) { - dout("mdsc con_get %p %d -> %d\n", s, - atomic_read(&s->s_ref) - 1, atomic_read(&s->s_ref)); + dout("mdsc con_get %p ok (%d)\n", s, atomic_read(&s->s_ref)); return con; } dout("mdsc con_get %p FAIL\n", s); @@ -2903,9 +2895,8 @@ static void con_put(struct ceph_connection *con) { struct ceph_mds_session *s = con->private; - dout("mdsc con_put %p %d -> %d\n", s, atomic_read(&s->s_ref), - atomic_read(&s->s_ref) - 1); ceph_put_mds_session(s); + dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref)); } /* @@ -2926,6 +2917,13 @@ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) struct ceph_mds_client *mdsc = s->s_mdsc; int type = le16_to_cpu(msg->hdr.type); + mutex_lock(&mdsc->mutex); + if (__verify_registered_session(mdsc, s) < 0) { + mutex_unlock(&mdsc->mutex); + goto out; + } + mutex_unlock(&mdsc->mutex); + switch (type) { case CEPH_MSG_MDS_MAP: ceph_mdsc_handle_map(mdsc, msg); @@ -2937,22 +2935,23 @@ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) handle_reply(s, msg); break; case CEPH_MSG_CLIENT_REQUEST_FORWARD: - handle_forward(mdsc, msg); + handle_forward(mdsc, s, msg); break; case CEPH_MSG_CLIENT_CAPS: ceph_handle_caps(s, msg); break; case CEPH_MSG_CLIENT_SNAP: - ceph_handle_snap(mdsc, msg); + ceph_handle_snap(mdsc, s, msg); break; case CEPH_MSG_CLIENT_LEASE: - handle_lease(mdsc, msg); + handle_lease(mdsc, s, msg); break; default: pr_err("received unknown message type %d %s\n", type, ceph_msg_type_name(type)); } +out: ceph_msg_put(msg); } diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index 49d0c4c59d81..bf2a5f3846a4 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -713,11 +713,11 @@ static void flush_snaps(struct ceph_mds_client *mdsc) * directory into another realm. */ void ceph_handle_snap(struct ceph_mds_client *mdsc, + struct ceph_mds_session *session, struct ceph_msg *msg) { struct super_block *sb = mdsc->client->sb; - struct ceph_mds_session *session; - int mds; + int mds = session->s_mds; u64 split; int op; int trace_len; @@ -730,10 +730,6 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc, int i; int locked_rwsem = 0; - if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS) - return; - mds = le64_to_cpu(msg->hdr.src.name.num); - /* decode */ if (msg->front.iov_len < sizeof(*h)) goto bad; @@ -749,15 +745,6 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc, dout("handle_snap from mds%d op %s split %llx tracelen %d\n", mds, ceph_snap_op_name(op), split, trace_len); - /* find session */ - mutex_lock(&mdsc->mutex); - session = __ceph_lookup_mds_session(mdsc, mds); - mutex_unlock(&mdsc->mutex); - if (!session) { - dout("WTF, got snap but no session for mds%d\n", mds); - return; - } - mutex_lock(&session->s_mutex); session->s_seq++; mutex_unlock(&session->s_mutex); diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 384f0e2e7c68..ff7aaa32736c 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -707,6 +707,7 @@ extern void ceph_put_snap_realm(struct ceph_mds_client *mdsc, extern int ceph_update_snap_trace(struct ceph_mds_client *m, void *p, void *e, bool deletion); extern void ceph_handle_snap(struct ceph_mds_client *mdsc, + struct ceph_mds_session *session, struct ceph_msg *msg); extern void ceph_queue_cap_snap(struct ceph_inode_info *ci, struct ceph_snap_context *snapc); -- cgit v1.2.3 From a1ea787c7b6ec036d169d84e08cca7b6e399ba70 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 23 Feb 2010 14:02:44 -0800 Subject: ceph: fix client_request_forward decoding The tid is in the message header, not body. Broken since 6df058c0. No need to look at next mds session; just mark the request and be done. (The old error path was broken too, but now it's gone.) Signed-off-by: Sage Weil --- fs/ceph/mds_client.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 4d00ea2af000..bec8a7aeb300 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1896,17 +1896,15 @@ static void handle_forward(struct ceph_mds_client *mdsc, struct ceph_msg *msg) { struct ceph_mds_request *req; - u64 tid; + u64 tid = le64_to_cpu(msg->hdr.tid); u32 next_mds; u32 fwd_seq; u8 must_resend; int err = -EINVAL; void *p = msg->front.iov_base; void *end = p + msg->front.iov_len; - int state; - ceph_decode_need(&p, end, sizeof(u64)+2*sizeof(u32), bad); - tid = ceph_decode_64(&p); + ceph_decode_need(&p, end, 2*sizeof(u32), bad); next_mds = ceph_decode_32(&p); fwd_seq = ceph_decode_32(&p); must_resend = ceph_decode_8(&p); @@ -1920,10 +1918,6 @@ static void handle_forward(struct ceph_mds_client *mdsc, goto out; /* dup reply? */ } - if (next_mds >= mdsc->max_sessions) - goto out; - - state = mdsc->sessions[next_mds]->s_state; if (fwd_seq <= req->r_num_fwd) { dout("forward %llu to mds%d - old seq %d <= %d\n", tid, next_mds, req->r_num_fwd, fwd_seq); -- cgit v1.2.3 From 88d892a37fc231ab2aa3b1c40ca9d67224616594 Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Tue, 23 Feb 2010 18:16:23 +0000 Subject: ceph: don't clobber write return value when using O_SYNC Signed-off-by: Yehuda Sadeh Signed-off-by: Sage Weil --- fs/ceph/file.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 2c4ae4441cab..88932c9145e9 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -807,7 +807,7 @@ static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov, struct ceph_osd_client *osdc = &ceph_client(inode->i_sb)->osdc; loff_t endoff = pos + iov->iov_len; int got = 0; - int ret; + int ret, err; if (ceph_snap(inode) != CEPH_NOSNAP) return -EROFS; @@ -838,9 +838,12 @@ retry_snap: if ((ret >= 0 || ret == -EIOCBQUEUED) && ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host) - || ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) - ret = vfs_fsync_range(file, file->f_path.dentry, + || ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) { + err = vfs_fsync_range(file, file->f_path.dentry, pos, pos + ret - 1, 1); + if (err < 0) + ret = err; + } } if (ret >= 0) { spin_lock(&inode->i_lock); -- cgit v1.2.3 From bcf59e2c4dea780e4abf48d5e673f5d79f9ee064 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 23 Feb 2010 15:04:23 +0300 Subject: uwb: remove duplicate cpu_to_le16() These parameters should be passed as cpu endian because we change it to little endian inside usb_control_msg(). On x86 cpu_to_le16() doesn't do anything so either way works but I think the original code would break on big endian systems. I removed the masks as well because that usb_control_msg() parameters are __u16 so we already only use the lower bits. Signed-off-by: Dan Carpenter Signed-off-by: David Vrabel --- drivers/uwb/i1480/dfu/usb.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c index a6a93755627e..a99e211a1b87 100644 --- a/drivers/uwb/i1480/dfu/usb.c +++ b/drivers/uwb/i1480/dfu/usb.c @@ -120,8 +120,7 @@ int i1480_usb_write(struct i1480 *i1480, u32 memory_address, result = usb_control_msg( i1480_usb->usb_dev, usb_sndctrlpipe(i1480_usb->usb_dev, 0), 0xf0, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - cpu_to_le16(memory_address & 0xffff), - cpu_to_le16((memory_address >> 16) & 0xffff), + memory_address, (memory_address >> 16), i1480->cmd_buf, buffer_size, 100 /* FIXME: arbitrary */); if (result < 0) break; @@ -166,8 +165,7 @@ int i1480_usb_read(struct i1480 *i1480, u32 addr, size_t size) result = usb_control_msg( i1480_usb->usb_dev, usb_rcvctrlpipe(i1480_usb->usb_dev, 0), 0xf0, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - cpu_to_le16(itr_addr & 0xffff), - cpu_to_le16((itr_addr >> 16) & 0xffff), + itr_addr, (itr_addr >> 16), i1480->cmd_buf + itr, itr_size, 100 /* FIXME: arbitrary */); if (result < 0) { -- cgit v1.2.3 From 161fd65ac934608345aed35226fc889ea3b0b500 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Thu, 25 Feb 2010 12:38:57 -0800 Subject: ceph: invalidate_authorizer without con->mutex held This fixes lock ABBA inversion, as the ->invalidate_authorizer() op may need to take a lock (or even call back into the messenger). Signed-off-by: Sage Weil --- fs/ceph/messenger.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index fdda707aa137..9ea7b763c8dc 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -1853,14 +1853,6 @@ static void ceph_fault(struct ceph_connection *con) con->in_msg = NULL; } - /* - * in case we faulted due to authentication, invalidate our - * current tickets so that we can get new ones. - */ - if (con->auth_retry && con->ops->invalidate_authorizer) { - dout("calling invalidate_authorizer()\n"); - con->ops->invalidate_authorizer(con); - } /* If there are no messages in the queue, place the connection * in a STANDBY state (i.e., don't try to reconnect just yet). */ @@ -1890,6 +1882,15 @@ static void ceph_fault(struct ceph_connection *con) out_unlock: mutex_unlock(&con->mutex); out: + /* + * in case we faulted due to authentication, invalidate our + * current tickets so that we can get new ones. + */ + if (con->auth_retry && con->ops->invalidate_authorizer) { + dout("calling invalidate_authorizer()\n"); + con->ops->invalidate_authorizer(con); + } + if (con->ops->fault) con->ops->fault(con); } -- cgit v1.2.3 From e80a52d14f868059e8ec790c9fae88cdb8a1df98 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Thu, 25 Feb 2010 12:40:45 -0800 Subject: ceph: fix connection fault STANDBY check Move any out_sent messages to out_queue _before_ checking if out_queue is empty and going to STANDBY, or else we may drop something that was never acked. And clean up the code a bit (less goto). Signed-off-by: Sage Weil --- fs/ceph/messenger.c | 31 +++++++++++++------------------ 1 file changed, 13 insertions(+), 18 deletions(-) diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index 9ea7b763c8dc..0ddc2c75f6b4 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -1853,32 +1853,27 @@ static void ceph_fault(struct ceph_connection *con) con->in_msg = NULL; } + /* Requeue anything that hasn't been acked */ + list_splice_init(&con->out_sent, &con->out_queue); /* If there are no messages in the queue, place the connection * in a STANDBY state (i.e., don't try to reconnect just yet). */ if (list_empty(&con->out_queue) && !con->out_keepalive_pending) { dout("fault setting STANDBY\n"); set_bit(STANDBY, &con->state); - mutex_unlock(&con->mutex); - goto out; + } else { + /* retry after a delay. */ + if (con->delay == 0) + con->delay = BASE_DELAY_INTERVAL; + else if (con->delay < MAX_DELAY_INTERVAL) + con->delay *= 2; + dout("fault queueing %p delay %lu\n", con, con->delay); + con->ops->get(con); + if (queue_delayed_work(ceph_msgr_wq, &con->work, + round_jiffies_relative(con->delay)) == 0) + con->ops->put(con); } - /* Requeue anything that hasn't been acked, and retry after a - * delay. */ - list_splice_init(&con->out_sent, &con->out_queue); - - if (con->delay == 0) - con->delay = BASE_DELAY_INTERVAL; - else if (con->delay < MAX_DELAY_INTERVAL) - con->delay *= 2; - - /* explicitly schedule work to try to reconnect again later. */ - dout("fault queueing %p delay %lu\n", con, con->delay); - con->ops->get(con); - if (queue_delayed_work(ceph_msgr_wq, &con->work, - round_jiffies_relative(con->delay)) == 0) - con->ops->put(con); - out_unlock: mutex_unlock(&con->mutex); out: -- cgit v1.2.3 From c99eb1c7263a44e63161a041a778b345b5cf0b6a Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 26 Feb 2010 09:37:33 -0800 Subject: ceph: remove fragile __map_osds optimization We used to try to avoid freeing and then reallocating the osd struct. This is a bit fragile due to potential interactions with other references (beyond o_requests), and may be the cause of this crash: [120633.442358] BUG: unable to handle kernel NULL pointer dereference at (null) [120633.443292] IP: [] rb_erase+0x11d/0x277 [120633.443292] PGD f7ff3067 PUD f7f53067 PMD 0 [120633.443292] Oops: 0000 [#1] PREEMPT SMP [120633.443292] last sysfs file: /sys/kernel/uevent_seqnum [120633.443292] CPU 1 [120633.443292] Modules linked in: ceph fan ac battery psmouse ehci_hcd ide_pci_generic ohci_hcd thermal processor button [120633.443292] Pid: 3023, comm: ceph-msgr/1 Not tainted 2.6.32-rc2 #12 H8SSL [120633.443292] RIP: 0010:[] [] rb_erase+0x11d/0x277 [120633.443292] RSP: 0018:ffff8800f7b13a50 EFLAGS: 00010246 [120633.443292] RAX: ffff880022907819 RBX: ffff880022907818 RCX: 0000000000000000 [120633.443292] RDX: ffff8800f7b13a80 RSI: ffff8800f587eb48 RDI: 0000000000000000 [120633.443292] RBP: ffff8800f7b13a60 R08: 0000000000000000 R09: 0000000000000004 [120633.443292] R10: 0000000000000000 R11: ffff8800c4441000 R12: ffff8800f587eb48 [120633.443292] R13: ffff8800f58eaa00 R14: ffff8800f413c000 R15: 0000000000000001 [120633.443292] FS: 00007fbef6e226e0(0000) GS:ffff880009200000(0000) knlGS:0000000000000000 [120633.443292] CS: 0010 DS: 0018 ES: 0018 CR0: 000000008005003b [120633.443292] CR2: 0000000000000000 CR3: 00000000f7c53000 CR4: 00000000000006e0 [120633.443292] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [120633.443292] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 [120633.443292] Process ceph-msgr/1 (pid: 3023, threadinfo ffff8800f7b12000, task ffff8800f5858b40) [120633.443292] Stack: [120633.443292] ffff8800f413c000 ffff8800f587e9c0 ffff8800f7b13a80 ffffffffa0098a86 [120633.443292] <0> 00000000000006f1 0000000000000000 ffff8800f7b13af0 ffffffffa009959b [120633.443292] <0> ffff8800f413c000 ffff880022a68400 ffff880022a68400 ffff8800f587e9c0 [120633.443292] Call Trace: [120633.443292] [] __remove_osd+0x4d/0xbc [ceph] [120633.443292] [] __map_osds+0x199/0x4fa [ceph] [120633.443292] [] ? __send_request+0xf8/0x186 [ceph] [120633.443292] [] kick_requests+0x169/0x3cb [ceph] [120633.443292] [] ceph_osdc_handle_map+0x370/0x522 [ceph] Since we're probably screwed anyway if a small kmalloc is failing, don't bother with trying to be clever here. Signed-off-by: Sage Weil --- fs/ceph/osd_client.c | 21 ++++----------------- 1 file changed, 4 insertions(+), 17 deletions(-) diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index ffd819c5a5dd..3a631f27cc9e 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -615,7 +615,6 @@ static int __map_osds(struct ceph_osd_client *osdc, struct ceph_pg pgid; int o = -1; int err; - struct ceph_osd *newosd = NULL; dout("map_osds %p tid %lld\n", req, req->r_tid); err = ceph_calc_object_layout(&reqhead->layout, req->r_oid, @@ -639,25 +638,15 @@ static int __map_osds(struct ceph_osd_client *osdc, if (req->r_osd) { __cancel_request(req); list_del_init(&req->r_osd_item); - if (list_empty(&req->r_osd->o_requests)) { - /* try to re-use r_osd if possible */ - newosd = get_osd(req->r_osd); - __remove_osd(osdc, newosd); - } req->r_osd = NULL; } req->r_osd = __lookup_osd(osdc, o); if (!req->r_osd && o >= 0) { - if (newosd) { - req->r_osd = newosd; - newosd = NULL; - } else { - err = -ENOMEM; - req->r_osd = create_osd(osdc); - if (!req->r_osd) - goto out; - } + err = -ENOMEM; + req->r_osd = create_osd(osdc); + if (!req->r_osd) + goto out; dout("map_osds osd %p is osd%d\n", req->r_osd, o); req->r_osd->o_osd = o; @@ -674,8 +663,6 @@ static int __map_osds(struct ceph_osd_client *osdc, err = 1; /* osd changed */ out: - if (newosd) - put_osd(newosd); return err; } -- cgit v1.2.3 From 080af17e9c6360c5a835528e8de3141a46273ed2 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Thu, 25 Feb 2010 16:40:07 -0800 Subject: ceph: remove bogus mds forward warning The must_resend flag is always true, not false. In any case, we can just ignore it anyway. Signed-off-by: Sage Weil --- fs/ceph/mds_client.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index bec8a7aeb300..a2600101ec22 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1899,7 +1899,6 @@ static void handle_forward(struct ceph_mds_client *mdsc, u64 tid = le64_to_cpu(msg->hdr.tid); u32 next_mds; u32 fwd_seq; - u8 must_resend; int err = -EINVAL; void *p = msg->front.iov_base; void *end = p + msg->front.iov_len; @@ -1907,14 +1906,11 @@ static void handle_forward(struct ceph_mds_client *mdsc, ceph_decode_need(&p, end, 2*sizeof(u32), bad); next_mds = ceph_decode_32(&p); fwd_seq = ceph_decode_32(&p); - must_resend = ceph_decode_8(&p); - - WARN_ON(must_resend); /* shouldn't happen. */ mutex_lock(&mdsc->mutex); req = __lookup_request(mdsc, tid); if (!req) { - dout("forward %llu dne\n", tid); + dout("forward %llu to mds%d - req dne\n", tid, next_mds); goto out; /* dup reply? */ } -- cgit v1.2.3 From 1679f876a641d209e7b22e43ebda0693c71003cf Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 26 Feb 2010 13:55:51 -0800 Subject: ceph: reset bits on connection close Clear LOSSYTX bit, so that if/when we reconnect, said reconnect will retry on failure. Clear _PENDING bits too, to avoid polluting subsequent connection state. Drop unused REGISTERED bit. Signed-off-by: Sage Weil --- fs/ceph/messenger.c | 3 +++ fs/ceph/messenger.h | 1 - 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index 0ddc2c75f6b4..bf4590c77cf6 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -342,6 +342,9 @@ void ceph_con_close(struct ceph_connection *con) dout("con_close %p peer %s\n", con, pr_addr(&con->peer_addr.in_addr)); set_bit(CLOSED, &con->state); /* in case there's queued work */ clear_bit(STANDBY, &con->state); /* avoid connect_seq bump */ + clear_bit(LOSSYTX, &con->state); /* so we retry next connect */ + clear_bit(KEEPALIVE_PENDING, &con->state); + clear_bit(WRITE_PENDING, &con->state); mutex_lock(&con->mutex); reset_connection(con); cancel_delayed_work(&con->work); diff --git a/fs/ceph/messenger.h b/fs/ceph/messenger.h index c9735378be3f..4caaa5911110 100644 --- a/fs/ceph/messenger.h +++ b/fs/ceph/messenger.h @@ -119,7 +119,6 @@ struct ceph_msg_pos { * state with the peer. */ #define CLOSED 10 /* we've closed the connection */ #define SOCK_CLOSED 11 /* socket state changed to closed */ -#define REGISTERED 12 /* connection appears in con_tree */ #define OPENING 13 /* open connection w/ (possibly new) peer */ #define DEAD 14 /* dead, about to kfree */ -- cgit v1.2.3 From c16e786927b977cb880873214bbd815e8d5ec4ba Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 1 Mar 2010 13:02:00 -0800 Subject: ceph: use single osd op reply msg Use a single ceph_msg for the osd reply, even when we are getting multiple replies. Signed-off-by: Sage Weil --- fs/ceph/osd_client.c | 138 +++++++++++++++++---------------------------------- fs/ceph/osd_client.h | 5 +- 2 files changed, 46 insertions(+), 97 deletions(-) diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index 3a631f27cc9e..ffe1f4064ccd 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -13,7 +13,8 @@ #include "decode.h" #include "auth.h" -#define OSD_REPLY_RESERVE_FRONT_LEN 512 +#define OSD_OP_FRONT_LEN 4096 +#define OSD_OPREPLY_FRONT_LEN 512 const static struct ceph_connection_operations osd_con_ops; @@ -75,17 +76,6 @@ static void calc_layout(struct ceph_osd_client *osdc, req->r_oid, req->r_oid_len, objoff, objlen, req->r_num_pages); } -static void remove_replies(struct ceph_osd_request *req) -{ - int i; - int max = ARRAY_SIZE(req->replies); - - for (i=0; ireplies[i]) - ceph_msg_put(req->replies[i]); - } -} - /* * requests */ @@ -99,7 +89,6 @@ void ceph_osdc_release_request(struct kref *kref) ceph_msg_put(req->r_request); if (req->r_reply) ceph_msg_put(req->r_reply); - remove_replies(req); if (req->r_con_filling_msg) { dout("release_request revoking pages %p from con %p\n", req->r_pages, req->r_con_filling_msg); @@ -117,60 +106,6 @@ void ceph_osdc_release_request(struct kref *kref) kfree(req); } -static int alloc_replies(struct ceph_osd_request *req, int num_reply) -{ - int i; - int max = ARRAY_SIZE(req->replies); - - BUG_ON(num_reply > max); - - for (i=0; ireplies[i] = ceph_msg_new(0, OSD_REPLY_RESERVE_FRONT_LEN, 0, 0, NULL); - if (IS_ERR(req->replies[i])) { - int j; - int err = PTR_ERR(req->replies[i]); - for (j = 0; j<=i; j++) { - ceph_msg_put(req->replies[j]); - } - return err; - } - } - - for (; ireplies[i] = NULL; - } - - req->cur_reply = 0; - - return 0; -} - -static struct ceph_msg *__get_next_reply(struct ceph_connection *con, - struct ceph_osd_request *req, - int front_len) -{ - struct ceph_msg *reply; - if (req->r_con_filling_msg) { - dout("revoking reply msg %p from old con %p\n", req->r_reply, - req->r_con_filling_msg); - ceph_con_revoke_message(req->r_con_filling_msg, req->r_reply); - ceph_con_put(req->r_con_filling_msg); - req->cur_reply = 0; - } - reply = req->replies[req->cur_reply]; - if (!reply || front_len > OSD_REPLY_RESERVE_FRONT_LEN) { - /* maybe we can allocate it now? */ - reply = ceph_msg_new(0, front_len, 0, 0, NULL); - if (!reply || IS_ERR(reply)) { - pr_err(" reply alloc failed, front_len=%d\n", front_len); - return ERR_PTR(-ENOMEM); - } - } - req->r_con_filling_msg = ceph_con_get(con); - req->r_reply = ceph_msg_get(reply); /* for duration of read over socket */ - return ceph_msg_get(reply); -} - /* * build new request AND message, calculate layout, and adjust file * extent as needed. @@ -201,7 +136,7 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, void *p; int num_op = 1 + do_sync; size_t msg_size = sizeof(*head) + num_op*sizeof(*op); - int err, i; + int i; if (use_mempool) { req = mempool_alloc(osdc->req_mempool, GFP_NOFS); @@ -212,13 +147,6 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, if (req == NULL) return ERR_PTR(-ENOMEM); - err = alloc_replies(req, num_reply); - if (err) { - ceph_osdc_put_request(req); - return ERR_PTR(-ENOMEM); - } - req->r_num_prealloc_reply = num_reply; - req->r_osdc = osdc; req->r_mempool = use_mempool; kref_init(&req->r_kref); @@ -229,7 +157,19 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0); - /* create message; allow space for oid */ + /* create reply message */ + if (use_mempool) + msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0); + else + msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, + OSD_OPREPLY_FRONT_LEN, 0, 0, NULL); + if (IS_ERR(msg)) { + ceph_osdc_put_request(req); + return ERR_PTR(PTR_ERR(msg)); + } + req->r_reply = msg; + + /* create request message; allow space for oid */ msg_size += 40; if (snapc) msg_size += sizeof(u64) * snapc->num_snaps; @@ -819,21 +759,11 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, * avoid a (safe but slower) revoke later. */ if (req->r_con_filling_msg == con && req->r_reply == msg) { - dout(" got pages, dropping con_filling_msg ref %p\n", con); + dout(" dropping con_filling_msg ref %p\n", con); req->r_con_filling_msg = NULL; ceph_con_put(con); } - if (req->r_reply) { - /* - * once we see the message has been received, we don't - * need a ref (which is only needed for revoking - * pages) - */ - ceph_msg_put(req->r_reply); - req->r_reply = NULL; - } - if (!req->r_got_reply) { unsigned bytes; @@ -1249,11 +1179,17 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client) if (!osdc->req_mempool) goto out; - err = ceph_msgpool_init(&osdc->msgpool_op, 4096, 10, true); + err = ceph_msgpool_init(&osdc->msgpool_op, OSD_OP_FRONT_LEN, 10, true); if (err < 0) goto out_mempool; + err = ceph_msgpool_init(&osdc->msgpool_op_reply, + OSD_OPREPLY_FRONT_LEN, 10, true); + if (err < 0) + goto out_msgpool; return 0; +out_msgpool: + ceph_msgpool_destroy(&osdc->msgpool_op); out_mempool: mempool_destroy(osdc->req_mempool); out: @@ -1271,6 +1207,7 @@ void ceph_osdc_stop(struct ceph_osd_client *osdc) remove_old_osds(osdc, 1); mempool_destroy(osdc->req_mempool); ceph_msgpool_destroy(&osdc->msgpool_op); + ceph_msgpool_destroy(&osdc->msgpool_op_reply); } /* @@ -1405,16 +1342,29 @@ static struct ceph_msg *get_reply(struct ceph_connection *con, if (!req) { *skip = 1; m = NULL; - pr_info("alloc_msg unknown tid %llu from osd%d\n", tid, + pr_info("get_reply unknown tid %llu from osd%d\n", tid, osd->o_osd); goto out; } - m = __get_next_reply(con, req, front); - if (!m || IS_ERR(m)) { - *skip = 1; - goto out; + + if (req->r_con_filling_msg) { + dout("get_reply revoking msg %p from old con %p\n", + req->r_reply, req->r_con_filling_msg); + ceph_con_revoke_message(req->r_con_filling_msg, req->r_reply); + ceph_con_put(req->r_con_filling_msg); } + if (front > req->r_reply->front.iov_len) { + pr_warning("get_reply front %d > preallocated %d\n", + front, (int)req->r_reply->front.iov_len); + m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, 0, 0, NULL); + if (IS_ERR(m)) + goto out; + ceph_msg_put(req->r_reply); + req->r_reply = m; + } + m = ceph_msg_get(req->r_reply); + if (data_len > 0) { err = __prepare_pages(con, hdr, req, tid, m); if (err < 0) { @@ -1424,6 +1374,8 @@ static struct ceph_msg *get_reply(struct ceph_connection *con, } } *skip = 0; + req->r_con_filling_msg = ceph_con_get(con); + dout("get_reply tid %lld %p\n", tid, m); out: mutex_unlock(&osdc->request_mutex); diff --git a/fs/ceph/osd_client.h b/fs/ceph/osd_client.h index 70f31b61f02c..f256eba6fe7a 100644 --- a/fs/ceph/osd_client.h +++ b/fs/ceph/osd_client.h @@ -53,7 +53,6 @@ struct ceph_osd_request { int r_flags; /* any additional flags for the osd */ u32 r_sent; /* >0 if r_request is sending/sent */ int r_got_reply; - int r_num_prealloc_reply; struct ceph_osd_client *r_osdc; struct kref r_kref; @@ -77,9 +76,6 @@ struct ceph_osd_request { struct page **r_pages; /* pages for data payload */ int r_pages_from_pool; int r_own_pages; /* if true, i own page list */ - - struct ceph_msg *replies[2]; - int cur_reply; }; struct ceph_osd_client { @@ -106,6 +102,7 @@ struct ceph_osd_client { mempool_t *req_mempool; struct ceph_msgpool msgpool_op; + struct ceph_msgpool msgpool_op_reply; }; extern int ceph_osdc_init(struct ceph_osd_client *osdc, -- cgit v1.2.3 From 70edb55bdfa8922c8ad40bc5a67abb6d9fee8d47 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 1 Mar 2010 13:20:50 -0800 Subject: ceph: fix snaptrace decoding on cap migration between mds This was simply broken. Apparently at some point we thought about putting the snaptrace in the middle section, but didn't. Signed-off-by: Sage Weil --- fs/ceph/caps.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index bb846164addc..9afa8d37a6e3 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -2608,6 +2608,7 @@ void ceph_handle_caps(struct ceph_mds_session *session, u64 size, max_size; u64 tid; int check_caps = 0; + void *snaptrace; int r; dout("handle_caps from mds%d\n", mds); @@ -2617,6 +2618,7 @@ void ceph_handle_caps(struct ceph_mds_session *session, if (msg->front.iov_len < sizeof(*h)) goto bad; h = msg->front.iov_base; + snaptrace = h + 1; op = le32_to_cpu(h->op); vino.ino = le64_to_cpu(h->ino); vino.snap = CEPH_NOSNAP; @@ -2651,8 +2653,7 @@ void ceph_handle_caps(struct ceph_mds_session *session, case CEPH_CAP_OP_IMPORT: handle_cap_import(mdsc, inode, h, session, - msg->middle, - le32_to_cpu(h->snap_trace_len)); + snaptrace, le32_to_cpu(h->snap_trace_len)); check_caps = 1; /* we may have sent a RELEASE to the old auth */ goto done; } -- cgit v1.2.3 From 3ca02ef96e119d36bc1752baeae7dd0c59c2f325 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 1 Mar 2010 15:25:00 -0800 Subject: ceph: reset front len on return to msgpool; BUG on mismatched front iov Reset msg front len when a message is returned to the pool: the caller may have changed it. BUG if we try to send a message with a hdr.front_len that doesn't match the front iov. Signed-off-by: Sage Weil --- fs/ceph/messenger.c | 2 ++ fs/ceph/msgpool.c | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c index bf4590c77cf6..781656a49bf8 100644 --- a/fs/ceph/messenger.c +++ b/fs/ceph/messenger.c @@ -1954,6 +1954,8 @@ void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg) msg->hdr.src.addr = con->msgr->my_enc_addr; msg->hdr.orig_src = msg->hdr.src; + BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len)); + /* queue */ mutex_lock(&con->mutex); BUG_ON(!list_empty(&msg->list_head)); diff --git a/fs/ceph/msgpool.c b/fs/ceph/msgpool.c index 2f04e0fc4666..ca3b44a89f2d 100644 --- a/fs/ceph/msgpool.c +++ b/fs/ceph/msgpool.c @@ -166,6 +166,10 @@ void ceph_msgpool_put(struct ceph_msgpool *pool, struct ceph_msg *msg) { spin_lock(&pool->lock); if (pool->num < pool->min) { + /* reset msg front_len; user may have changed it */ + msg->front.iov_len = pool->front_len; + msg->hdr.front_len = cpu_to_le32(pool->front_len); + kref_set(&msg->kref, 1); /* retake a single ref */ list_add(&msg->list_head, &pool->msgs); pool->num++; -- cgit v1.2.3 From 6f863e712d4114e8ae2f02de64ebeac0546ebaa0 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 1 Mar 2010 15:26:41 -0800 Subject: ceph: set osd request message front length correctly We didn't set the front length correctly. When messages used the message pool we ended up with the conservative max (4 KB), and the rest of the time the slightly less conservative estimate. Even though the OSD ignores the extra data, set it to the right value to avoid sending extra data over the network. Signed-off-by: Yehuda Sadeh Signed-off-by: Sage Weil --- fs/ceph/osd_client.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index ffe1f4064ccd..c4763bff97b4 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -228,6 +228,9 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, } BUG_ON(p > msg->front.iov_base + msg->front.iov_len); + msg_size = p - msg->front.iov_base; + msg->front.iov_len = msg_size; + msg->hdr.front_len = cpu_to_le32(msg_size); return req; } -- cgit v1.2.3 From 195d3ce2cc9a8ec69827f6369c41b269345b9988 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 1 Mar 2010 09:57:54 -0800 Subject: ceph: return EBADF if waiting for caps on closed file Verify the file is actually open for the given caps when we are waiting for caps. This ensures we will wake up and return EBADF if another thread closes the file out from under us. Note that EBADF is also the correct return code from write(2) when called on a file handle opened for reading (although the vfs should catch that). Signed-off-by: Sage Weil --- fs/ceph/caps.c | 9 ++++++--- fs/ceph/file.c | 3 +++ 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 9afa8d37a6e3..06f197983be6 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -1923,14 +1923,17 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want, struct inode *inode = &ci->vfs_inode; int ret = 0; int have, implemented; + int file_wanted; dout("get_cap_refs %p need %s want %s\n", inode, ceph_cap_string(need), ceph_cap_string(want)); spin_lock(&inode->i_lock); - /* make sure we _have_ some caps! */ - if (!__ceph_is_any_caps(ci)) { - dout("get_cap_refs %p no real caps\n", inode); + /* make sure file is actually open */ + file_wanted = __ceph_caps_file_wanted(ci); + if ((file_wanted & need) == 0) { + dout("try_get_cap_refs need %s file_wanted %s, EBADF\n", + ceph_cap_string(need), ceph_cap_string(file_wanted)); *err = -EBADF; ret = 1; goto out; diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 88932c9145e9..5d2af8464f6a 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -262,6 +262,9 @@ int ceph_release(struct inode *inode, struct file *file) kfree(cf->dir_info); dput(cf->dentry); kmem_cache_free(ceph_file_cachep, cf); + + /* wake up anyone waiting for caps on this inode */ + wake_up(&ci->i_cap_wq); return 0; } -- cgit v1.2.3 From e53a8fd773065628b24605b289a9a40ee4a35d83 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 1 Mar 2010 14:50:05 -0800 Subject: ceph: fix osdmap decoding when pools include (removed) snaps Add missing pointer dereference (p is a void **). Signed-off-by: Sage Weil --- fs/ceph/osdmap.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c index 34b5696c84fd..b83f2692b835 100644 --- a/fs/ceph/osdmap.c +++ b/fs/ceph/osdmap.c @@ -529,8 +529,8 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) ceph_decode_copy(p, &pi->v, sizeof(pi->v)); __insert_pg_pool(&map->pg_pools, pi); calc_pg_masks(pi); - p += le32_to_cpu(pi->v.num_snaps) * sizeof(u64); - p += le32_to_cpu(pi->v.num_removed_snap_intervals) + *p += le32_to_cpu(pi->v.num_snaps) * sizeof(u64); + *p += le32_to_cpu(pi->v.num_removed_snap_intervals) * sizeof(u64) * 2; } ceph_decode_32_safe(p, end, map->pool_max, bad); -- cgit v1.2.3 From 7af8f1e4aa86720840d3318e4dc225c3c7e5a6d0 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 1 Mar 2010 15:17:34 -0800 Subject: ceph: include migrating caps in issued set We should include caps that are mid-migration (we've received the EXPORT, but not the IMPORT) in the issued caps set. Signed-off-by: Sage Weil --- fs/ceph/caps.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 06f197983be6..295b7e547a31 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -655,7 +655,7 @@ static int __cap_is_valid(struct ceph_cap *cap) */ int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented) { - int have = ci->i_snap_caps; + int have = ci->i_snap_caps | ci->i_cap_exporting_issued; struct ceph_cap *cap; struct rb_node *p; -- cgit v1.2.3 From e9964c102312967a4bc1fd501cb628c4a3b19034 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 1 Mar 2010 15:16:56 -0800 Subject: ceph: fix flush_dirty_caps race with caps migration The flush_dirty_caps() used to loop over the first entry of the cap_dirty dirty list on the assumption that after calling ceph_check_caps() it would be removed from the list. This isn't true for caps that are being migrated between MDSs, where we've received the EXPORT but not the IMPORT. Instead, do a safe list iteration, and pin the next inode on the list via the CEPH_I_NOFLUSH flag. Signed-off-by: Sage Weil --- fs/ceph/caps.c | 45 ++++++++++++++++++++++++++++++++++++++------- fs/ceph/super.h | 1 + 2 files changed, 39 insertions(+), 7 deletions(-) diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 295b7e547a31..8b89b9123252 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -1573,6 +1573,11 @@ retry_locked: } ack: + if (ci->i_ceph_flags & CEPH_I_NOFLUSH) { + dout(" skipping %p I_NOFLUSH set\n", inode); + continue; + } + if (session && session != cap->session) { dout("oops, wrong session %p mutex\n", session); mutex_unlock(&session->s_mutex); @@ -1652,6 +1657,10 @@ static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session, retry: spin_lock(&inode->i_lock); + if (ci->i_ceph_flags & CEPH_I_NOFLUSH) { + dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode); + goto out; + } if (ci->i_dirty_caps && ci->i_auth_cap) { struct ceph_cap *cap = ci->i_auth_cap; int used = __ceph_caps_used(ci); @@ -2747,16 +2756,38 @@ void ceph_check_delayed_caps(struct ceph_mds_client *mdsc) */ void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc) { - struct ceph_inode_info *ci; - struct inode *inode; + struct ceph_inode_info *ci, *nci = NULL; + struct inode *inode, *ninode = NULL; + struct list_head *p, *n; dout("flush_dirty_caps\n"); spin_lock(&mdsc->cap_dirty_lock); - while (!list_empty(&mdsc->cap_dirty)) { - ci = list_first_entry(&mdsc->cap_dirty, - struct ceph_inode_info, - i_dirty_item); - inode = igrab(&ci->vfs_inode); + list_for_each_safe(p, n, &mdsc->cap_dirty) { + if (nci) { + ci = nci; + inode = ninode; + ci->i_ceph_flags &= ~CEPH_I_NOFLUSH; + dout("flush_dirty_caps inode %p (was next inode)\n", + inode); + } else { + ci = list_entry(p, struct ceph_inode_info, + i_dirty_item); + inode = igrab(&ci->vfs_inode); + BUG_ON(!inode); + dout("flush_dirty_caps inode %p\n", inode); + } + if (n != &mdsc->cap_dirty) { + nci = list_entry(n, struct ceph_inode_info, + i_dirty_item); + ninode = igrab(&nci->vfs_inode); + BUG_ON(!ninode); + nci->i_ceph_flags |= CEPH_I_NOFLUSH; + dout("flush_dirty_caps next inode %p, noflush\n", + ninode); + } else { + nci = NULL; + ninode = NULL; + } spin_unlock(&mdsc->cap_dirty_lock); if (inode) { ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH, diff --git a/fs/ceph/super.h b/fs/ceph/super.h index ff7aaa32736c..6a778f2c3f6e 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -289,6 +289,7 @@ struct ceph_inode_xattrs_info { #define CEPH_I_COMPLETE 1 /* we have complete directory cached */ #define CEPH_I_NODELAY 4 /* do not delay cap release */ #define CEPH_I_FLUSH 8 /* do not delay flush of dirty metadata */ +#define CEPH_I_NOFLUSH 16 /* do not flush dirty caps */ struct ceph_inode_info { struct ceph_vino i_vino; /* ceph ino + snap */ -- cgit v1.2.3 From 6c71dcb28ff9b63b814a0b76a256f5dae08d3e0d Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Wed, 2 Dec 2009 14:28:48 -0600 Subject: [SCSI] scsi_dh_emc: fix mode select request setup This patch fixes the request setup code for mode selects. I got the fixes from Hannes Reinecke while trying to hunt down some problems and merged it into one patch. I am sending it because Hannes is busy with other things. The patch fixes: - setting of the length for mode selects. - setting of the data direction for mode select 10. Signed-off-by: Hannes Reinecke Signed-off-by: Mike Christie Signed-off-by: James Bottomley --- drivers/scsi/device_handler/scsi_dh_emc.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c index 61966750bd60..63032ec3db92 100644 --- a/drivers/scsi/device_handler/scsi_dh_emc.c +++ b/drivers/scsi/device_handler/scsi_dh_emc.c @@ -272,7 +272,7 @@ static struct request *get_req(struct scsi_device *sdev, int cmd, int len = 0; rq = blk_get_request(sdev->request_queue, - (cmd == MODE_SELECT) ? WRITE : READ, GFP_NOIO); + (cmd != INQUIRY) ? WRITE : READ, GFP_NOIO); if (!rq) { sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed"); return NULL; @@ -286,14 +286,17 @@ static struct request *get_req(struct scsi_device *sdev, int cmd, len = sizeof(short_trespass); rq->cmd_flags |= REQ_RW; rq->cmd[1] = 0x10; + rq->cmd[4] = len; break; case MODE_SELECT_10: len = sizeof(long_trespass); rq->cmd_flags |= REQ_RW; rq->cmd[1] = 0x10; + rq->cmd[8] = len; break; case INQUIRY: len = CLARIION_BUFFER_SIZE; + rq->cmd[4] = len; memset(buffer, 0, len); break; default: @@ -301,7 +304,6 @@ static struct request *get_req(struct scsi_device *sdev, int cmd, break; } - rq->cmd[4] = len; rq->cmd_type = REQ_TYPE_BLOCK_PC; rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; -- cgit v1.2.3 From a32c055feed74246747bf4f45adb765136d3a4d3 Mon Sep 17 00:00:00 2001 From: Wayne Boyer Date: Fri, 19 Feb 2010 13:23:36 -0800 Subject: [SCSI] ipr: add support for new adapter command structures for the next generation chip Change the adapter command structures such that both 32 bit and 64 bit based adapters can work with the driver. Signed-off-by: Wayne Boyer Signed-off-by: James Bottomley --- drivers/scsi/ipr.c | 450 +++++++++++++++++++++++++++++++++++++++-------------- drivers/scsi/ipr.h | 61 ++++++-- 2 files changed, 385 insertions(+), 126 deletions(-) diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 032f0d0e6cb4..359882eadc26 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -131,13 +131,13 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { }; static const struct ipr_chip_t ipr_chip[] = { - { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, &ipr_chip_cfg[0] }, - { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, &ipr_chip_cfg[0] }, - { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, &ipr_chip_cfg[0] }, - { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, &ipr_chip_cfg[0] }, - { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, &ipr_chip_cfg[0] }, - { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, &ipr_chip_cfg[1] }, - { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, &ipr_chip_cfg[1] } + { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, &ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] }, + { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] } }; static int ipr_max_bus_speeds [] = { @@ -468,7 +468,10 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd, trace_entry->time = jiffies; trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; trace_entry->type = type; - trace_entry->ata_op_code = ipr_cmd->ioarcb.add_data.u.regs.command; + if (ipr_cmd->ioa_cfg->sis64) + trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command; + else + trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command; trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff; trace_entry->res_handle = ipr_cmd->ioarcb.res_handle; trace_entry->u.add_data = add_data; @@ -488,16 +491,23 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd) { struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; - dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr); + dma_addr_t dma_addr = ipr_cmd->dma_addr; memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); - ioarcb->write_data_transfer_length = 0; + ioarcb->data_transfer_length = 0; ioarcb->read_data_transfer_length = 0; - ioarcb->write_ioadl_len = 0; + ioarcb->ioadl_len = 0; ioarcb->read_ioadl_len = 0; - ioarcb->write_ioadl_addr = - cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl)); - ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; + + if (ipr_cmd->ioa_cfg->sis64) + ioarcb->u.sis64_addr_data.data_ioadl_addr = + cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); + else { + ioarcb->write_ioadl_addr = + cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); + ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; + } + ioasa->ioasc = 0; ioasa->residual_data_len = 0; ioasa->u.gata.status = 0; @@ -692,6 +702,35 @@ static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg) LEAVE; } +/** + * ipr_send_command - Send driver initiated requests. + * @ipr_cmd: ipr command struct + * + * This function sends a command to the adapter using the correct write call. + * In the case of sis64, calculate the ioarcb size required. Then or in the + * appropriate bits. + * + * Return value: + * none + **/ +static void ipr_send_command(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + dma_addr_t send_dma_addr = ipr_cmd->dma_addr; + + if (ioa_cfg->sis64) { + /* The default size is 256 bytes */ + send_dma_addr |= 0x1; + + /* If the number of ioadls * size of ioadl > 128 bytes, + then use a 512 byte ioarcb */ + if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 ) + send_dma_addr |= 0x4; + writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg); + } else + writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg); +} + /** * ipr_do_req - Send driver initiated requests. * @ipr_cmd: ipr command struct @@ -724,8 +763,8 @@ static void ipr_do_req(struct ipr_cmnd *ipr_cmd, ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0); mb(); - writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr), - ioa_cfg->regs.ioarrin_reg); + + ipr_send_command(ipr_cmd); } /** @@ -746,6 +785,51 @@ static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd) complete(&ipr_cmd->completion); } +/** + * ipr_init_ioadl - initialize the ioadl for the correct SIS type + * @ipr_cmd: ipr command struct + * @dma_addr: dma address + * @len: transfer length + * @flags: ioadl flag value + * + * This function initializes an ioadl in the case where there is only a single + * descriptor. + * + * Return value: + * nothing + **/ +static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr, + u32 len, int flags) +{ + struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; + struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; + + ipr_cmd->dma_use_sg = 1; + + if (ipr_cmd->ioa_cfg->sis64) { + ioadl64->flags = cpu_to_be32(flags); + ioadl64->data_len = cpu_to_be32(len); + ioadl64->address = cpu_to_be64(dma_addr); + + ipr_cmd->ioarcb.ioadl_len = + cpu_to_be32(sizeof(struct ipr_ioadl64_desc)); + ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len); + } else { + ioadl->flags_and_data_len = cpu_to_be32(flags | len); + ioadl->address = cpu_to_be32(dma_addr); + + if (flags == IPR_IOADL_FLAGS_READ_LAST) { + ipr_cmd->ioarcb.read_ioadl_len = + cpu_to_be32(sizeof(struct ipr_ioadl_desc)); + ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len); + } else { + ipr_cmd->ioarcb.ioadl_len = + cpu_to_be32(sizeof(struct ipr_ioadl_desc)); + ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len); + } + } +} + /** * ipr_send_blocking_cmd - Send command and sleep on its completion. * @ipr_cmd: ipr command struct @@ -803,11 +887,8 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type, ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff; ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff; - ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam)); - ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); - ipr_cmd->ioadl[0].flags_and_data_len = - cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam)); - ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma); + ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma, + sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST); if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE) ipr_cmd->done = ipr_process_ccn; @@ -817,8 +898,8 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type, ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR); mb(); - writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr), - ioa_cfg->regs.ioarrin_reg); + + ipr_send_command(ipr_cmd); } else { list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); } @@ -2975,6 +3056,37 @@ static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist, return result; } +/** + * ipr_build_ucode_ioadl64 - Build a microcode download IOADL + * @ipr_cmd: ipr command struct + * @sglist: scatter/gather list + * + * Builds a microcode download IOA data list (IOADL). + * + **/ +static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd, + struct ipr_sglist *sglist) +{ + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; + struct scatterlist *scatterlist = sglist->scatterlist; + int i; + + ipr_cmd->dma_use_sg = sglist->num_dma_sg; + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; + ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len); + + ioarcb->ioadl_len = + cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); + for (i = 0; i < ipr_cmd->dma_use_sg; i++) { + ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE); + ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i])); + ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i])); + } + + ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); +} + /** * ipr_build_ucode_ioadl - Build a microcode download IOADL * @ipr_cmd: ipr command struct @@ -2987,14 +3099,15 @@ static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd, struct ipr_sglist *sglist) { struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; - struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; + struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; struct scatterlist *scatterlist = sglist->scatterlist; int i; ipr_cmd->dma_use_sg = sglist->num_dma_sg; ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; - ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len); - ioarcb->write_ioadl_len = + ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len); + + ioarcb->ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); for (i = 0; i < ipr_cmd->dma_use_sg; i++) { @@ -3828,14 +3941,19 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg, ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); ioarcb = &ipr_cmd->ioarcb; cmd_pkt = &ioarcb->cmd_pkt; - regs = &ioarcb->add_data.u.regs; + + if (ipr_cmd->ioa_cfg->sis64) { + regs = &ipr_cmd->i.ata_ioadl.regs; + ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb)); + } else + regs = &ioarcb->u.add_data.u.regs; ioarcb->res_handle = res->cfgte.res_handle; cmd_pkt->request_type = IPR_RQTYPE_IOACMD; cmd_pkt->cdb[0] = IPR_RESET_DEVICE; if (ipr_is_gata(res)) { cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET; - ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(regs->flags)); + ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags)); regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; } @@ -4308,6 +4426,53 @@ static irqreturn_t ipr_isr(int irq, void *devp) return rc; } +/** + * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer + * @ioa_cfg: ioa config struct + * @ipr_cmd: ipr command struct + * + * Return value: + * 0 on success / -1 on failure + **/ +static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_cmnd *ipr_cmd) +{ + int i, nseg; + struct scatterlist *sg; + u32 length; + u32 ioadl_flags = 0; + struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; + + length = scsi_bufflen(scsi_cmd); + if (!length) + return 0; + + nseg = scsi_dma_map(scsi_cmd); + if (nseg < 0) { + dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n"); + return -1; + } + + ipr_cmd->dma_use_sg = nseg; + + if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { + ioadl_flags = IPR_IOADL_FLAGS_WRITE; + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; + } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) + ioadl_flags = IPR_IOADL_FLAGS_READ; + + scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) { + ioadl64[i].flags = cpu_to_be32(ioadl_flags); + ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg)); + ioadl64[i].address = cpu_to_be64(sg_dma_address(sg)); + } + + ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); + return 0; +} + /** * ipr_build_ioadl - Build a scatter/gather list and map the buffer * @ioa_cfg: ioa config struct @@ -4325,7 +4490,7 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg, u32 ioadl_flags = 0; struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; - struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; + struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; length = scsi_bufflen(scsi_cmd); if (!length) @@ -4342,8 +4507,8 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg, if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { ioadl_flags = IPR_IOADL_FLAGS_WRITE; ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; - ioarcb->write_data_transfer_length = cpu_to_be32(length); - ioarcb->write_ioadl_len = + ioarcb->data_transfer_length = cpu_to_be32(length); + ioarcb->ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) { ioadl_flags = IPR_IOADL_FLAGS_READ; @@ -4352,11 +4517,10 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg, cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); } - if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) { - ioadl = ioarcb->add_data.u.ioadl; - ioarcb->write_ioadl_addr = - cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) + - offsetof(struct ipr_ioarcb, add_data)); + if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) { + ioadl = ioarcb->u.add_data.u.ioadl; + ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) + + offsetof(struct ipr_ioarcb, u.add_data)); ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; } @@ -4446,18 +4610,24 @@ static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd) { struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; - dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr); + dma_addr_t dma_addr = ipr_cmd->dma_addr; memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); - ioarcb->write_data_transfer_length = 0; + ioarcb->data_transfer_length = 0; ioarcb->read_data_transfer_length = 0; - ioarcb->write_ioadl_len = 0; + ioarcb->ioadl_len = 0; ioarcb->read_ioadl_len = 0; ioasa->ioasc = 0; ioasa->residual_data_len = 0; - ioarcb->write_ioadl_addr = - cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl)); - ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; + + if (ipr_cmd->ioa_cfg->sis64) + ioarcb->u.sis64_addr_data.data_ioadl_addr = + cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); + else { + ioarcb->write_ioadl_addr = + cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); + ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; + } } /** @@ -4489,15 +4659,8 @@ static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd) cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ); - ipr_cmd->ioadl[0].flags_and_data_len = - cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE); - ipr_cmd->ioadl[0].address = - cpu_to_be32(ipr_cmd->sense_buffer_dma); - - ipr_cmd->ioarcb.read_ioadl_len = - cpu_to_be32(sizeof(struct ipr_ioadl_desc)); - ipr_cmd->ioarcb.read_data_transfer_length = - cpu_to_be32(SCSI_SENSE_BUFFERSIZE); + ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma, + SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST); ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout, IPR_REQUEST_SENSE_TIMEOUT * 2); @@ -4916,13 +5079,16 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd, (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; - if (likely(rc == 0)) - rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); + if (likely(rc == 0)) { + if (ioa_cfg->sis64) + rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd); + else + rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); + } if (likely(rc == 0)) { mb(); - writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr), - ioa_cfg->regs.ioarrin_reg); + ipr_send_command(ipr_cmd); } else { list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q); return SCSI_MLQUEUE_HOST_BUSY; @@ -5145,6 +5311,52 @@ static void ipr_sata_done(struct ipr_cmnd *ipr_cmd) ata_qc_complete(qc); } +/** + * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list + * @ipr_cmd: ipr command struct + * @qc: ATA queued command + * + **/ +static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd, + struct ata_queued_cmd *qc) +{ + u32 ioadl_flags = 0; + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; + struct ipr_ioadl64_desc *last_ioadl64 = NULL; + int len = qc->nbytes; + struct scatterlist *sg; + unsigned int si; + dma_addr_t dma_addr = ipr_cmd->dma_addr; + + if (len == 0) + return; + + if (qc->dma_dir == DMA_TO_DEVICE) { + ioadl_flags = IPR_IOADL_FLAGS_WRITE; + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; + } else if (qc->dma_dir == DMA_FROM_DEVICE) + ioadl_flags = IPR_IOADL_FLAGS_READ; + + ioarcb->data_transfer_length = cpu_to_be32(len); + ioarcb->ioadl_len = + cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); + ioarcb->u.sis64_addr_data.data_ioadl_addr = + cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl)); + + for_each_sg(qc->sg, sg, qc->n_elem, si) { + ioadl64->flags = cpu_to_be32(ioadl_flags); + ioadl64->data_len = cpu_to_be32(sg_dma_len(sg)); + ioadl64->address = cpu_to_be64(sg_dma_address(sg)); + + last_ioadl64 = ioadl64; + ioadl64++; + } + + if (likely(last_ioadl64)) + last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST); +} + /** * ipr_build_ata_ioadl - Build an ATA scatter/gather list * @ipr_cmd: ipr command struct @@ -5156,7 +5368,7 @@ static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd, { u32 ioadl_flags = 0; struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; - struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; + struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl; struct ipr_ioadl_desc *last_ioadl = NULL; int len = qc->nbytes; struct scatterlist *sg; @@ -5168,8 +5380,8 @@ static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd, if (qc->dma_dir == DMA_TO_DEVICE) { ioadl_flags = IPR_IOADL_FLAGS_WRITE; ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; - ioarcb->write_data_transfer_length = cpu_to_be32(len); - ioarcb->write_ioadl_len = + ioarcb->data_transfer_length = cpu_to_be32(len); + ioarcb->ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); } else if (qc->dma_dir == DMA_FROM_DEVICE) { ioadl_flags = IPR_IOADL_FLAGS_READ; @@ -5212,10 +5424,15 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc) ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); ioarcb = &ipr_cmd->ioarcb; - regs = &ioarcb->add_data.u.regs; - memset(&ioarcb->add_data, 0, sizeof(ioarcb->add_data)); - ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(ioarcb->add_data.u.regs)); + if (ioa_cfg->sis64) { + regs = &ipr_cmd->i.ata_ioadl.regs; + ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb)); + } else + regs = &ioarcb->u.add_data.u.regs; + + memset(regs, 0, sizeof(*regs)); + ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs)); list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); ipr_cmd->qc = qc; @@ -5226,7 +5443,11 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc) ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; ipr_cmd->dma_use_sg = qc->n_elem; - ipr_build_ata_ioadl(ipr_cmd, qc); + if (ioa_cfg->sis64) + ipr_build_ata_ioadl64(ipr_cmd, qc); + else + ipr_build_ata_ioadl(ipr_cmd, qc); + regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; ipr_copy_sata_tf(regs, &qc->tf); memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN); @@ -5257,8 +5478,9 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc) } mb(); - writel(be32_to_cpu(ioarcb->ioarcb_host_pci_addr), - ioa_cfg->regs.ioarrin_reg); + + ipr_send_command(ipr_cmd); + return 0; } @@ -5459,7 +5681,7 @@ static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev, * ipr_set_supported_devs - Send Set Supported Devices for a device * @ipr_cmd: ipr command struct * - * This function send a Set Supported Devices to the adapter + * This function sends a Set Supported Devices to the adapter * * Return value: * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN @@ -5468,7 +5690,6 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev; - struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; struct ipr_resource_entry *res = ipr_cmd->u.res; @@ -5489,13 +5710,11 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd) ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff; ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff; - ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | - sizeof(struct ipr_supported_device)); - ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma + - offsetof(struct ipr_misc_cbs, supp_dev)); - ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); - ioarcb->write_data_transfer_length = - cpu_to_be32(sizeof(struct ipr_supported_device)); + ipr_init_ioadl(ipr_cmd, + ioa_cfg->vpd_cbs_dma + + offsetof(struct ipr_misc_cbs, supp_dev), + sizeof(struct ipr_supported_device), + IPR_IOADL_FLAGS_WRITE_LAST); ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_SET_SUP_DEVICE_TIMEOUT); @@ -5695,10 +5914,9 @@ static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg, * none **/ static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd, - __be32 res_handle, u8 parm, u32 dma_addr, - u8 xfer_len) + __be32 res_handle, u8 parm, + dma_addr_t dma_addr, u8 xfer_len) { - struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; ioarcb->res_handle = res_handle; @@ -5708,11 +5926,7 @@ static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd, ioarcb->cmd_pkt.cdb[1] = parm; ioarcb->cmd_pkt.cdb[4] = xfer_len; - ioadl->flags_and_data_len = - cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len); - ioadl->address = cpu_to_be32(dma_addr); - ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); - ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len); + ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST); } /** @@ -5762,9 +5976,8 @@ static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd) **/ static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd, __be32 res_handle, - u8 parm, u32 dma_addr, u8 xfer_len) + u8 parm, dma_addr_t dma_addr, u8 xfer_len) { - struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; ioarcb->res_handle = res_handle; @@ -5773,11 +5986,7 @@ static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd, ioarcb->cmd_pkt.cdb[4] = xfer_len; ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; - ioadl->flags_and_data_len = - cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len); - ioadl->address = cpu_to_be32(dma_addr); - ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); - ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len); + ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST); } /** @@ -6033,7 +6242,6 @@ static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; - struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap; @@ -6050,13 +6258,9 @@ static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd) ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff; ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff; - ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); - ioarcb->read_data_transfer_length = - cpu_to_be32(sizeof(struct ipr_config_table)); - - ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma); - ioadl->flags_and_data_len = - cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table)); + ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, + sizeof(struct ipr_config_table), + IPR_IOADL_FLAGS_READ_LAST); ipr_cmd->job_step = ipr_init_res_table; @@ -6076,10 +6280,9 @@ static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd) * none **/ static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page, - u32 dma_addr, u8 xfer_len) + dma_addr_t dma_addr, u8 xfer_len) { struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; - struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; ENTER; ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; @@ -6090,12 +6293,7 @@ static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page, ioarcb->cmd_pkt.cdb[2] = page; ioarcb->cmd_pkt.cdb[4] = xfer_len; - ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); - ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len); - - ioadl->address = cpu_to_be32(dma_addr); - ioadl->flags_and_data_len = - cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len); + ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST); ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); LEAVE; @@ -6785,7 +6983,10 @@ static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd) ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8; ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff; - ipr_build_ucode_ioadl(ipr_cmd, sglist); + if (ioa_cfg->sis64) + ipr_build_ucode_ioadl64(ipr_cmd, sglist); + else + ipr_build_ucode_ioadl(ipr_cmd, sglist); ipr_cmd->job_step = ipr_reset_ucode_download_done; ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, @@ -7209,7 +7410,7 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) int i; ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev, - sizeof(struct ipr_cmnd), 8, 0); + sizeof(struct ipr_cmnd), 16, 0); if (!ioa_cfg->ipr_cmd_pool) return -ENOMEM; @@ -7227,13 +7428,25 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr; ioarcb = &ipr_cmd->ioarcb; - ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr); + ipr_cmd->dma_addr = dma_addr; + if (ioa_cfg->sis64) + ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr); + else + ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr); + ioarcb->host_response_handle = cpu_to_be32(i << 2); - ioarcb->write_ioadl_addr = - cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl)); - ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; - ioarcb->ioasa_host_pci_addr = - cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa)); + if (ioa_cfg->sis64) { + ioarcb->u.sis64_addr_data.data_ioadl_addr = + cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64)); + ioarcb->u.sis64_addr_data.ioasa_host_pci_addr = + cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, ioasa)); + } else { + ioarcb->write_ioadl_addr = + cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl)); + ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; + ioarcb->ioasa_host_pci_addr = + cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa)); + } ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa)); ipr_cmd->cmd_index = i; ipr_cmd->ioa_cfg = ioa_cfg; @@ -7578,6 +7791,8 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, goto out_scsi_host_put; } + /* set SIS 32 or SIS 64 */ + ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0; ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg; if (ipr_transop_timeout) @@ -7615,7 +7830,16 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, pci_set_master(pdev); - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (ioa_cfg->sis64) { + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (rc < 0) { + dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n"); + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + } + + } else + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc < 0) { dev_err(&pdev->dev, "Failed to set PCI DMA mask\n"); goto cleanup_nomem; diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 19bbcf39f0c9..64e41df2a196 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -381,7 +381,7 @@ struct ipr_cmd_pkt { #define IPR_RQTYPE_HCAM 0x02 #define IPR_RQTYPE_ATA_PASSTHRU 0x04 - u8 luntar_luntrn; + u8 reserved2; u8 flags_hi; #define IPR_FLAGS_HI_WRITE_NOT_READ 0x80 @@ -403,7 +403,7 @@ struct ipr_cmd_pkt { __be16 timeout; }__attribute__ ((packed, aligned(4))); -struct ipr_ioarcb_ata_regs { +struct ipr_ioarcb_ata_regs { /* 22 bytes */ u8 flags; #define IPR_ATA_FLAG_PACKET_CMD 0x80 #define IPR_ATA_FLAG_XFER_TYPE_DMA 0x40 @@ -442,28 +442,49 @@ struct ipr_ioadl_desc { __be32 address; }__attribute__((packed, aligned (8))); +struct ipr_ioadl64_desc { + __be32 flags; + __be32 data_len; + __be64 address; +}__attribute__((packed, aligned (16))); + +struct ipr_ata64_ioadl { + struct ipr_ioarcb_ata_regs regs; + u16 reserved[5]; + struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES]; +}__attribute__((packed, aligned (16))); + struct ipr_ioarcb_add_data { union { struct ipr_ioarcb_ata_regs regs; struct ipr_ioadl_desc ioadl[5]; __be32 add_cmd_parms[10]; - }u; -}__attribute__ ((packed, aligned(4))); + } u; +}__attribute__ ((packed, aligned (4))); + +struct ipr_ioarcb_sis64_add_addr_ecb { + __be64 ioasa_host_pci_addr; + __be64 data_ioadl_addr; + __be64 reserved; + __be32 ext_control_buf[4]; +}__attribute__((packed, aligned (8))); /* IOA Request Control Block 128 bytes */ struct ipr_ioarcb { - __be32 ioarcb_host_pci_addr; - __be32 reserved; + union { + __be32 ioarcb_host_pci_addr; + __be64 ioarcb_host_pci_addr64; + } a; __be32 res_handle; __be32 host_response_handle; __be32 reserved1; __be32 reserved2; __be32 reserved3; - __be32 write_data_transfer_length; + __be32 data_transfer_length; __be32 read_data_transfer_length; __be32 write_ioadl_addr; - __be32 write_ioadl_len; + __be32 ioadl_len; __be32 read_ioadl_addr; __be32 read_ioadl_len; @@ -473,8 +494,14 @@ struct ipr_ioarcb { struct ipr_cmd_pkt cmd_pkt; - __be32 add_cmd_parms_len; - struct ipr_ioarcb_add_data add_data; + __be16 add_cmd_parms_offset; + __be16 add_cmd_parms_len; + + union { + struct ipr_ioarcb_add_data add_data; + struct ipr_ioarcb_sis64_add_addr_ecb sis64_addr_data; + } u; + }__attribute__((packed, aligned (4))); struct ipr_ioasa_vset { @@ -1029,6 +1056,9 @@ struct ipr_chip_t { u16 intr_type; #define IPR_USE_LSI 0x00 #define IPR_USE_MSI 0x01 + u16 sis_type; +#define IPR_SIS32 0x00 +#define IPR_SIS64 0x01 const struct ipr_chip_cfg_t *cfg; }; @@ -1099,6 +1129,7 @@ struct ipr_ioa_cfg { u8 dual_raid:1; u8 needs_warm_reset:1; u8 msi_received:1; + u8 sis64:1; u8 revid; @@ -1202,13 +1233,17 @@ struct ipr_ioa_cfg { char ipr_cmd_label[8]; #define IPR_CMD_LABEL "ipr_cmd" struct ipr_cmnd *ipr_cmnd_list[IPR_NUM_CMD_BLKS]; - u32 ipr_cmnd_list_dma[IPR_NUM_CMD_BLKS]; + dma_addr_t ipr_cmnd_list_dma[IPR_NUM_CMD_BLKS]; }; struct ipr_cmnd { struct ipr_ioarcb ioarcb; + union { + struct ipr_ioadl_desc ioadl[IPR_NUM_IOADL_ENTRIES]; + struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES]; + struct ipr_ata64_ioadl ata_ioadl; + } i; struct ipr_ioasa ioasa; - struct ipr_ioadl_desc ioadl[IPR_NUM_IOADL_ENTRIES]; struct list_head queue; struct scsi_cmnd *scsi_cmd; struct ata_queued_cmd *qc; @@ -1221,7 +1256,7 @@ struct ipr_cmnd { u8 sense_buffer[SCSI_SENSE_BUFFERSIZE]; dma_addr_t sense_buffer_dma; unsigned short dma_use_sg; - dma_addr_t dma_handle; + dma_addr_t dma_addr; struct ipr_cmnd *sibling; union { enum ipr_shutdown_type shutdown_type; -- cgit v1.2.3 From a74c16390a47dcb6c96b20b572ffc9936073d4b1 Mon Sep 17 00:00:00 2001 From: Wayne Boyer Date: Fri, 19 Feb 2010 13:23:51 -0800 Subject: [SCSI] ipr: define new offsets to registers for the next generation chip This patch adds the entry to the ipr_chip_cfg array that defines the register offsets for the next generation 64 bit IOA PCI interface chip. Signed-off-by: Wayne Boyer Signed-off-by: James Bottomley --- drivers/scsi/ipr.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 359882eadc26..e6bab3fb6945 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -128,6 +128,21 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { .clr_uproc_interrupt_reg = 0x00294 } }, + { /* CRoC */ + .mailbox = 0x00040, + .cache_line_size = 0x20, + { + .set_interrupt_mask_reg = 0x00010, + .clr_interrupt_mask_reg = 0x00018, + .sense_interrupt_mask_reg = 0x00010, + .clr_interrupt_reg = 0x00008, + .sense_interrupt_reg = 0x00000, + .ioarrin_reg = 0x00070, + .sense_uproc_interrupt_reg = 0x00020, + .set_uproc_interrupt_reg = 0x00020, + .clr_uproc_interrupt_reg = 0x00028 + } + }, }; static const struct ipr_chip_t ipr_chip[] = { -- cgit v1.2.3 From 3e7ebdfa58ddaef361f9538219e66a7226fb1e5d Mon Sep 17 00:00:00 2001 From: Wayne Boyer Date: Fri, 19 Feb 2010 13:23:59 -0800 Subject: [SCSI] ipr: update the configuration table code for the next generation chip This patch changes the configuration table structures and related code such that both 32 bit and 64 bit based adapters can work with the driver. This patch also implements the code to generate the virtual bus/id/lun values for devices connected to the new adapters. It also implements support for the new device resource path. Signed-off-by: Wayne Boyer Signed-off-by: James Bottomley --- drivers/scsi/ipr.c | 512 +++++++++++++++++++++++++++++++++++++++++++---------- drivers/scsi/ipr.h | 174 ++++++++++++------ 2 files changed, 538 insertions(+), 148 deletions(-) diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index e6bab3fb6945..91e330a12721 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -72,6 +72,7 @@ #include #include #include +#include #include #include #include @@ -93,6 +94,7 @@ static unsigned int ipr_fastfail = 0; static unsigned int ipr_transop_timeout = 0; static unsigned int ipr_enable_cache = 1; static unsigned int ipr_debug = 0; +static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS; static unsigned int ipr_dual_ioa_raid = 1; static DEFINE_SPINLOCK(ipr_driver_lock); @@ -177,6 +179,9 @@ module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)"); module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0); MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)"); +module_param_named(max_devs, ipr_max_devs, int, 0); +MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. " + "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]"); MODULE_LICENSE("GPL"); MODULE_VERSION(IPR_DRIVER_VERSION); @@ -920,15 +925,47 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type, } } +/** + * ipr_update_ata_class - Update the ata class in the resource entry + * @res: resource entry struct + * @proto: cfgte device bus protocol value + * + * Return value: + * none + **/ +static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto) +{ + switch(proto) { + case IPR_PROTO_SATA: + case IPR_PROTO_SAS_STP: + res->ata_class = ATA_DEV_ATA; + break; + case IPR_PROTO_SATA_ATAPI: + case IPR_PROTO_SAS_STP_ATAPI: + res->ata_class = ATA_DEV_ATAPI; + break; + default: + res->ata_class = ATA_DEV_UNKNOWN; + break; + }; +} + /** * ipr_init_res_entry - Initialize a resource entry struct. * @res: resource entry struct + * @cfgtew: config table entry wrapper struct * * Return value: * none **/ -static void ipr_init_res_entry(struct ipr_resource_entry *res) +static void ipr_init_res_entry(struct ipr_resource_entry *res, + struct ipr_config_table_entry_wrapper *cfgtew) { + int found = 0; + unsigned int proto; + struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg; + struct ipr_resource_entry *gscsi_res = NULL; + res->needs_sync_complete = 0; res->in_erp = 0; res->add_to_ml = 0; @@ -936,6 +973,205 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res) res->resetting_device = 0; res->sdev = NULL; res->sata_port = NULL; + + if (ioa_cfg->sis64) { + proto = cfgtew->u.cfgte64->proto; + res->res_flags = cfgtew->u.cfgte64->res_flags; + res->qmodel = IPR_QUEUEING_MODEL64(res); + res->type = cfgtew->u.cfgte64->res_type & 0x0f; + + memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, + sizeof(res->res_path)); + + res->bus = 0; + res->lun = scsilun_to_int(&res->dev_lun); + + if (res->type == IPR_RES_TYPE_GENERIC_SCSI) { + list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) { + if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) { + found = 1; + res->target = gscsi_res->target; + break; + } + } + if (!found) { + res->target = find_first_zero_bit(ioa_cfg->target_ids, + ioa_cfg->max_devs_supported); + set_bit(res->target, ioa_cfg->target_ids); + } + + memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, + sizeof(res->dev_lun.scsi_lun)); + } else if (res->type == IPR_RES_TYPE_IOAFP) { + res->bus = IPR_IOAFP_VIRTUAL_BUS; + res->target = 0; + } else if (res->type == IPR_RES_TYPE_ARRAY) { + res->bus = IPR_ARRAY_VIRTUAL_BUS; + res->target = find_first_zero_bit(ioa_cfg->array_ids, + ioa_cfg->max_devs_supported); + set_bit(res->target, ioa_cfg->array_ids); + } else if (res->type == IPR_RES_TYPE_VOLUME_SET) { + res->bus = IPR_VSET_VIRTUAL_BUS; + res->target = find_first_zero_bit(ioa_cfg->vset_ids, + ioa_cfg->max_devs_supported); + set_bit(res->target, ioa_cfg->vset_ids); + } else { + res->target = find_first_zero_bit(ioa_cfg->target_ids, + ioa_cfg->max_devs_supported); + set_bit(res->target, ioa_cfg->target_ids); + } + } else { + proto = cfgtew->u.cfgte->proto; + res->qmodel = IPR_QUEUEING_MODEL(res); + res->flags = cfgtew->u.cfgte->flags; + if (res->flags & IPR_IS_IOA_RESOURCE) + res->type = IPR_RES_TYPE_IOAFP; + else + res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f; + + res->bus = cfgtew->u.cfgte->res_addr.bus; + res->target = cfgtew->u.cfgte->res_addr.target; + res->lun = cfgtew->u.cfgte->res_addr.lun; + } + + ipr_update_ata_class(res, proto); +} + +/** + * ipr_is_same_device - Determine if two devices are the same. + * @res: resource entry struct + * @cfgtew: config table entry wrapper struct + * + * Return value: + * 1 if the devices are the same / 0 otherwise + **/ +static int ipr_is_same_device(struct ipr_resource_entry *res, + struct ipr_config_table_entry_wrapper *cfgtew) +{ + if (res->ioa_cfg->sis64) { + if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id, + sizeof(cfgtew->u.cfgte64->dev_id)) && + !memcmp(&res->lun, &cfgtew->u.cfgte64->lun, + sizeof(cfgtew->u.cfgte64->lun))) { + return 1; + } + } else { + if (res->bus == cfgtew->u.cfgte->res_addr.bus && + res->target == cfgtew->u.cfgte->res_addr.target && + res->lun == cfgtew->u.cfgte->res_addr.lun) + return 1; + } + + return 0; +} + +/** + * ipr_format_resource_path - Format the resource path for printing. + * @res_path: resource path + * @buf: buffer + * + * Return value: + * pointer to buffer + **/ +static char *ipr_format_resource_path(u8 *res_path, char *buffer) +{ + int i; + + sprintf(buffer, "%02X", res_path[0]); + for (i=1; res_path[i] != 0xff; i++) + sprintf(buffer, "%s:%02X", buffer, res_path[i]); + + return buffer; +} + +/** + * ipr_update_res_entry - Update the resource entry. + * @res: resource entry struct + * @cfgtew: config table entry wrapper struct + * + * Return value: + * none + **/ +static void ipr_update_res_entry(struct ipr_resource_entry *res, + struct ipr_config_table_entry_wrapper *cfgtew) +{ + char buffer[IPR_MAX_RES_PATH_LENGTH]; + unsigned int proto; + int new_path = 0; + + if (res->ioa_cfg->sis64) { + res->flags = cfgtew->u.cfgte64->flags; + res->res_flags = cfgtew->u.cfgte64->res_flags; + res->type = cfgtew->u.cfgte64->res_type & 0x0f; + + memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data, + sizeof(struct ipr_std_inq_data)); + + res->qmodel = IPR_QUEUEING_MODEL64(res); + proto = cfgtew->u.cfgte64->proto; + res->res_handle = cfgtew->u.cfgte64->res_handle; + res->dev_id = cfgtew->u.cfgte64->dev_id; + + memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, + sizeof(res->dev_lun.scsi_lun)); + + if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path, + sizeof(res->res_path))) { + memcpy(res->res_path, &cfgtew->u.cfgte64->res_path, + sizeof(res->res_path)); + new_path = 1; + } + + if (res->sdev && new_path) + sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n", + ipr_format_resource_path(&res->res_path[0], &buffer[0])); + } else { + res->flags = cfgtew->u.cfgte->flags; + if (res->flags & IPR_IS_IOA_RESOURCE) + res->type = IPR_RES_TYPE_IOAFP; + else + res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f; + + memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data, + sizeof(struct ipr_std_inq_data)); + + res->qmodel = IPR_QUEUEING_MODEL(res); + proto = cfgtew->u.cfgte->proto; + res->res_handle = cfgtew->u.cfgte->res_handle; + } + + ipr_update_ata_class(res, proto); +} + +/** + * ipr_clear_res_target - Clear the bit in the bit map representing the target + * for the resource. + * @res: resource entry struct + * @cfgtew: config table entry wrapper struct + * + * Return value: + * none + **/ +static void ipr_clear_res_target(struct ipr_resource_entry *res) +{ + struct ipr_resource_entry *gscsi_res = NULL; + struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg; + + if (!ioa_cfg->sis64) + return; + + if (res->bus == IPR_ARRAY_VIRTUAL_BUS) + clear_bit(res->target, ioa_cfg->array_ids); + else if (res->bus == IPR_VSET_VIRTUAL_BUS) + clear_bit(res->target, ioa_cfg->vset_ids); + else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) { + list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) + if (gscsi_res->dev_id == res->dev_id && gscsi_res != res) + return; + clear_bit(res->target, ioa_cfg->target_ids); + + } else if (res->bus == 0) + clear_bit(res->target, ioa_cfg->target_ids); } /** @@ -947,17 +1183,24 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res) * none **/ static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg, - struct ipr_hostrcb *hostrcb) + struct ipr_hostrcb *hostrcb) { struct ipr_resource_entry *res = NULL; - struct ipr_config_table_entry *cfgte; + struct ipr_config_table_entry_wrapper cfgtew; + __be32 cc_res_handle; + u32 is_ndn = 1; - cfgte = &hostrcb->hcam.u.ccn.cfgte; + if (ioa_cfg->sis64) { + cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64; + cc_res_handle = cfgtew.u.cfgte64->res_handle; + } else { + cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte; + cc_res_handle = cfgtew.u.cfgte->res_handle; + } list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { - if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr, - sizeof(cfgte->res_addr))) { + if (res->res_handle == cc_res_handle) { is_ndn = 0; break; } @@ -975,20 +1218,22 @@ static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg, struct ipr_resource_entry, queue); list_del(&res->queue); - ipr_init_res_entry(res); + ipr_init_res_entry(res, &cfgtew); list_add_tail(&res->queue, &ioa_cfg->used_res_q); } - memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry)); + ipr_update_res_entry(res, &cfgtew); if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) { if (res->sdev) { res->del_from_ml = 1; - res->cfgte.res_handle = IPR_INVALID_RES_HANDLE; + res->res_handle = IPR_INVALID_RES_HANDLE; if (ioa_cfg->allow_ml_add_del) schedule_work(&ioa_cfg->work_q); - } else + } else { + ipr_clear_res_target(res); list_move_tail(&res->queue, &ioa_cfg->free_res_q); + } } else if (!res->sdev) { res->add_to_ml = 1; if (ioa_cfg->allow_ml_add_del) @@ -1941,12 +2186,14 @@ static const struct ipr_ses_table_entry * ipr_find_ses_entry(struct ipr_resource_entry *res) { int i, j, matches; + struct ipr_std_inq_vpids *vpids; const struct ipr_ses_table_entry *ste = ipr_ses_table; for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) { for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) { if (ste->compare_product_id_byte[j] == 'X') { - if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j]) + vpids = &res->std_inq_data.vpids; + if (vpids->product_id[j] == ste->product_id[j]) matches++; else break; @@ -1981,10 +2228,10 @@ static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_wi /* Loop through each config table entry in the config table buffer */ list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { - if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data))) + if (!(IPR_IS_SES_DEVICE(res->std_inq_data))) continue; - if (bus != res->cfgte.res_addr.bus) + if (bus != res->bus) continue; if (!(ste = ipr_find_ses_entry(res))) @@ -2518,9 +2765,9 @@ restart: list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { if (res->add_to_ml) { - bus = res->cfgte.res_addr.bus; - target = res->cfgte.res_addr.target; - lun = res->cfgte.res_addr.lun; + bus = res->bus; + target = res->target; + lun = res->lun; res->add_to_ml = 0; spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); scsi_add_device(ioa_cfg->host, bus, target, lun); @@ -3578,7 +3825,7 @@ static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribu spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); res = (struct ipr_resource_entry *)sdev->hostdata; if (res) - len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle); + len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return len; } @@ -3591,8 +3838,43 @@ static struct device_attribute ipr_adapter_handle_attr = { .show = ipr_show_adapter_handle }; +/** + * ipr_show_resource_path - Show the resource path for this device. + * @dev: device struct + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; + struct ipr_resource_entry *res; + unsigned long lock_flags = 0; + ssize_t len = -ENXIO; + char buffer[IPR_MAX_RES_PATH_LENGTH]; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + res = (struct ipr_resource_entry *)sdev->hostdata; + if (res) + len = snprintf(buf, PAGE_SIZE, "%s\n", + ipr_format_resource_path(&res->res_path[0], &buffer[0])); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return len; +} + +static struct device_attribute ipr_resource_path_attr = { + .attr = { + .name = "resource_path", + .mode = S_IRUSR, + }, + .show = ipr_show_resource_path +}; + static struct device_attribute *ipr_dev_attrs[] = { &ipr_adapter_handle_attr, + &ipr_resource_path_attr, NULL, }; @@ -3645,9 +3927,9 @@ static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget) struct ipr_resource_entry *res; list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { - if ((res->cfgte.res_addr.bus == starget->channel) && - (res->cfgte.res_addr.target == starget->id) && - (res->cfgte.res_addr.lun == 0)) { + if ((res->bus == starget->channel) && + (res->target == starget->id) && + (res->lun == 0)) { return res; } } @@ -3717,6 +3999,17 @@ static int ipr_target_alloc(struct scsi_target *starget) static void ipr_target_destroy(struct scsi_target *starget) { struct ipr_sata_port *sata_port = starget->hostdata; + struct Scsi_Host *shost = dev_to_shost(&starget->dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata; + + if (ioa_cfg->sis64) { + if (starget->channel == IPR_ARRAY_VIRTUAL_BUS) + clear_bit(starget->id, ioa_cfg->array_ids); + else if (starget->channel == IPR_VSET_VIRTUAL_BUS) + clear_bit(starget->id, ioa_cfg->vset_ids); + else if (starget->channel == 0) + clear_bit(starget->id, ioa_cfg->target_ids); + } if (sata_port) { starget->hostdata = NULL; @@ -3738,9 +4031,9 @@ static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev) struct ipr_resource_entry *res; list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { - if ((res->cfgte.res_addr.bus == sdev->channel) && - (res->cfgte.res_addr.target == sdev->id) && - (res->cfgte.res_addr.lun == sdev->lun)) + if ((res->bus == sdev->channel) && + (res->target == sdev->id) && + (res->lun == sdev->lun)) return res; } @@ -3789,6 +4082,7 @@ static int ipr_slave_configure(struct scsi_device *sdev) struct ipr_resource_entry *res; struct ata_port *ap = NULL; unsigned long lock_flags = 0; + char buffer[IPR_MAX_RES_PATH_LENGTH]; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); res = sdev->hostdata; @@ -3815,6 +4109,9 @@ static int ipr_slave_configure(struct scsi_device *sdev) ata_sas_slave_configure(sdev, ap); } else scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun); + if (ioa_cfg->sis64) + sdev_printk(KERN_INFO, sdev, "Resource path: %s\n", + ipr_format_resource_path(&res->res_path[0], &buffer[0])); return 0; } spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); @@ -3963,7 +4260,7 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg, } else regs = &ioarcb->u.add_data.u.regs; - ioarcb->res_handle = res->cfgte.res_handle; + ioarcb->res_handle = res->res_handle; cmd_pkt->request_type = IPR_RQTYPE_IOACMD; cmd_pkt->cdb[0] = IPR_RESET_DEVICE; if (ipr_is_gata(res)) { @@ -4013,19 +4310,7 @@ static int ipr_sata_reset(struct ata_link *link, unsigned int *classes, res = sata_port->res; if (res) { rc = ipr_device_reset(ioa_cfg, res); - switch(res->cfgte.proto) { - case IPR_PROTO_SATA: - case IPR_PROTO_SAS_STP: - *classes = ATA_DEV_ATA; - break; - case IPR_PROTO_SATA_ATAPI: - case IPR_PROTO_SAS_STP_ATAPI: - *classes = ATA_DEV_ATAPI; - break; - default: - *classes = ATA_DEV_UNKNOWN; - break; - }; + *classes = res->ata_class; } spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); @@ -4070,7 +4355,7 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd) return FAILED; list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { - if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) { + if (ipr_cmd->ioarcb.res_handle == res->res_handle) { if (ipr_cmd->scsi_cmd) ipr_cmd->done = ipr_scsi_eh_done; if (ipr_cmd->qc) @@ -4092,7 +4377,7 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd) spin_lock_irq(scsi_cmd->device->host->host_lock); list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { - if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) { + if (ipr_cmd->ioarcb.res_handle == res->res_handle) { rc = -EIO; break; } @@ -4131,13 +4416,13 @@ static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd) struct ipr_resource_entry *res; ENTER; - list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { - if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle, - sizeof(res->cfgte.res_handle))) { - scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus); - break; + if (!ioa_cfg->sis64) + list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { + if (res->res_handle == ipr_cmd->ioarcb.res_handle) { + scsi_report_bus_reset(ioa_cfg->host, res->bus); + break; + } } - } /* * If abort has not completed, indicate the reset has, else call the @@ -4235,7 +4520,7 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd) return SUCCESS; ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); - ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle; + ipr_cmd->ioarcb.res_handle = res->res_handle; cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; cmd_pkt->request_type = IPR_RQTYPE_IOACMD; cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS; @@ -5071,9 +5356,9 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd, memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len); ipr_cmd->scsi_cmd = scsi_cmd; - ioarcb->res_handle = res->cfgte.res_handle; + ioarcb->res_handle = res->res_handle; ipr_cmd->done = ipr_scsi_done; - ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr)); + ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res)); if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) { if (scsi_cmd->underflow == 0) @@ -5216,20 +5501,9 @@ static void ipr_ata_phy_reset(struct ata_port *ap) goto out_unlock; } - switch(res->cfgte.proto) { - case IPR_PROTO_SATA: - case IPR_PROTO_SAS_STP: - ap->link.device[0].class = ATA_DEV_ATA; - break; - case IPR_PROTO_SATA_ATAPI: - case IPR_PROTO_SAS_STP_ATAPI: - ap->link.device[0].class = ATA_DEV_ATAPI; - break; - default: - ap->link.device[0].class = ATA_DEV_UNKNOWN; + ap->link.device[0].class = res->ata_class; + if (ap->link.device[0].class == ATA_DEV_UNKNOWN) ata_port_disable(ap); - break; - }; out_unlock: spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); @@ -5315,8 +5589,7 @@ static void ipr_sata_done(struct ipr_cmnd *ipr_cmd) ipr_dump_ioasa(ioa_cfg, ipr_cmd, res); if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET) - scsi_report_device_reset(ioa_cfg->host, res->cfgte.res_addr.bus, - res->cfgte.res_addr.target); + scsi_report_device_reset(ioa_cfg->host, res->bus, res->target); if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR) qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status); @@ -5452,7 +5725,7 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc) list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); ipr_cmd->qc = qc; ipr_cmd->done = ipr_sata_done; - ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle; + ipr_cmd->ioarcb.res_handle = res->res_handle; ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU; ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; @@ -5466,7 +5739,7 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc) regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; ipr_copy_sata_tf(regs, &qc->tf); memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN); - ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr)); + ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res)); switch (qc->tf.protocol) { case ATA_PROT_NODATA: @@ -5715,13 +5988,14 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd) continue; ipr_cmd->u.res = res; - ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids); + ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids); ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES; + ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES; ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff; ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff; @@ -5734,7 +6008,8 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd) ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_SET_SUP_DEVICE_TIMEOUT); - ipr_cmd->job_step = ipr_set_supported_devs; + if (!ioa_cfg->sis64) + ipr_cmd->job_step = ipr_set_supported_devs; return IPR_RC_JOB_RETURN; } @@ -6182,24 +6457,36 @@ static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct ipr_resource_entry *res, *temp; - struct ipr_config_table_entry *cfgte; - int found, i; + struct ipr_config_table_entry_wrapper cfgtew; + int entries, found, flag, i; LIST_HEAD(old_res); ENTER; - if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ) + if (ioa_cfg->sis64) + flag = ioa_cfg->u.cfg_table64->hdr64.flags; + else + flag = ioa_cfg->u.cfg_table->hdr.flags; + + if (flag & IPR_UCODE_DOWNLOAD_REQ) dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n"); list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue) list_move_tail(&res->queue, &old_res); - for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) { - cfgte = &ioa_cfg->cfg_table->dev[i]; + if (ioa_cfg->sis64) + entries = ioa_cfg->u.cfg_table64->hdr64.num_entries; + else + entries = ioa_cfg->u.cfg_table->hdr.num_entries; + + for (i = 0; i < entries; i++) { + if (ioa_cfg->sis64) + cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i]; + else + cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i]; found = 0; list_for_each_entry_safe(res, temp, &old_res, queue) { - if (!memcmp(&res->cfgte.res_addr, - &cfgte->res_addr, sizeof(cfgte->res_addr))) { + if (ipr_is_same_device(res, &cfgtew)) { list_move_tail(&res->queue, &ioa_cfg->used_res_q); found = 1; break; @@ -6216,24 +6503,27 @@ static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd) res = list_entry(ioa_cfg->free_res_q.next, struct ipr_resource_entry, queue); list_move_tail(&res->queue, &ioa_cfg->used_res_q); - ipr_init_res_entry(res); + ipr_init_res_entry(res, &cfgtew); res->add_to_ml = 1; } if (found) - memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry)); + ipr_update_res_entry(res, &cfgtew); } list_for_each_entry_safe(res, temp, &old_res, queue) { if (res->sdev) { res->del_from_ml = 1; - res->cfgte.res_handle = IPR_INVALID_RES_HANDLE; + res->res_handle = IPR_INVALID_RES_HANDLE; list_move_tail(&res->queue, &ioa_cfg->used_res_q); - } else { - list_move_tail(&res->queue, &ioa_cfg->free_res_q); } } + list_for_each_entry_safe(res, temp, &old_res, queue) { + ipr_clear_res_target(res); + list_move_tail(&res->queue, &ioa_cfg->free_res_q); + } + if (ioa_cfg->dual_raid && ipr_dual_ioa_raid) ipr_cmd->job_step = ipr_ioafp_mode_sense_page24; else @@ -6270,11 +6560,10 @@ static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd) ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG; - ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff; - ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff; + ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff; + ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff; - ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, - sizeof(struct ipr_config_table), + ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size, IPR_IOADL_FLAGS_READ_LAST); ipr_cmd->job_step = ipr_init_res_table; @@ -6567,7 +6856,7 @@ static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg) ioa_cfg->toggle_bit = 1; /* Zero out config table */ - memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table)); + memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size); } /** @@ -7370,8 +7659,8 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg) ipr_free_cmd_blks(ioa_cfg); pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS, ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma); - pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table), - ioa_cfg->cfg_table, + pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size, + ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma); for (i = 0; i < IPR_NUM_HCAMS; i++) { @@ -7488,13 +7777,24 @@ static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg) ENTER; ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) * - IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL); + ioa_cfg->max_devs_supported, GFP_KERNEL); if (!ioa_cfg->res_entries) goto out; - for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++) + if (ioa_cfg->sis64) { + ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) * + BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL); + ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) * + BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL); + ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) * + BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL); + } + + for (i = 0; i < ioa_cfg->max_devs_supported; i++) { list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q); + ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg; + } ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs), @@ -7513,11 +7813,11 @@ static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg) if (!ioa_cfg->host_rrq) goto out_ipr_free_cmd_blocks; - ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev, - sizeof(struct ipr_config_table), - &ioa_cfg->cfg_table_dma); + ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev, + ioa_cfg->cfg_table_size, + &ioa_cfg->cfg_table_dma); - if (!ioa_cfg->cfg_table) + if (!ioa_cfg->u.cfg_table) goto out_free_host_rrq; for (i = 0; i < IPR_NUM_HCAMS; i++) { @@ -7551,8 +7851,9 @@ out_free_hostrcb_dma: ioa_cfg->hostrcb[i], ioa_cfg->hostrcb_dma[i]); } - pci_free_consistent(pdev, sizeof(struct ipr_config_table), - ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma); + pci_free_consistent(pdev, ioa_cfg->cfg_table_size, + ioa_cfg->u.cfg_table, + ioa_cfg->cfg_table_dma); out_free_host_rrq: pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS, ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma); @@ -7633,9 +7934,19 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, ioa_cfg->cache_state = CACHE_DISABLED; ipr_initialize_bus_attr(ioa_cfg); + ioa_cfg->max_devs_supported = ipr_max_devs; - host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS; - host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET; + if (ioa_cfg->sis64) { + host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS; + host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET; + if (ipr_max_devs > IPR_MAX_SIS64_DEVS) + ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS; + } else { + host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS; + host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET; + if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS) + ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS; + } host->max_channel = IPR_MAX_BUS_TO_SCAN; host->unique_id = host->host_no; host->max_cmd_len = IPR_MAX_CDB_LEN; @@ -7896,6 +8207,15 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg))) goto cleanup_nomem; + if (ioa_cfg->sis64) + ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64) + + ((sizeof(struct ipr_config_table_entry64) + * ioa_cfg->max_devs_supported))); + else + ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr) + + ((sizeof(struct ipr_config_table_entry) + * ioa_cfg->max_devs_supported))); + rc = ipr_alloc_mem(ioa_cfg); if (rc < 0) { dev_err(&pdev->dev, diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 64e41df2a196..f10c57b3d215 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -118,6 +118,10 @@ #define IPR_NUM_LOG_HCAMS 2 #define IPR_NUM_CFG_CHG_HCAMS 2 #define IPR_NUM_HCAMS (IPR_NUM_LOG_HCAMS + IPR_NUM_CFG_CHG_HCAMS) + +#define IPR_MAX_SIS64_TARGETS_PER_BUS 1024 +#define IPR_MAX_SIS64_LUNS_PER_TARGET 0xffffffff + #define IPR_MAX_NUM_TARGETS_PER_BUS 256 #define IPR_MAX_NUM_LUNS_PER_TARGET 256 #define IPR_MAX_NUM_VSET_LUNS_PER_TARGET 8 @@ -139,6 +143,8 @@ IPR_NUM_INTERNAL_CMD_BLKS) #define IPR_MAX_PHYSICAL_DEVS 192 +#define IPR_DEFAULT_SIS64_DEVS 1024 +#define IPR_MAX_SIS64_DEVS 4096 #define IPR_MAX_SGLIST 64 #define IPR_IOA_MAX_SECTORS 32767 @@ -173,6 +179,7 @@ #define IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE 0x01 #define IPR_HCAM_CDB_OP_CODE_LOG_DATA 0x02 #define IPR_SET_SUPPORTED_DEVICES 0xFB +#define IPR_SET_ALL_SUPPORTED_DEVICES 0x80 #define IPR_IOA_SHUTDOWN 0xF7 #define IPR_WR_BUF_DOWNLOAD_AND_SAVE 0x05 @@ -318,27 +325,27 @@ struct ipr_std_inq_data { u8 serial_num[IPR_SERIAL_NUM_LEN]; }__attribute__ ((packed)); +#define IPR_RES_TYPE_AF_DASD 0x00 +#define IPR_RES_TYPE_GENERIC_SCSI 0x01 +#define IPR_RES_TYPE_VOLUME_SET 0x02 +#define IPR_RES_TYPE_REMOTE_AF_DASD 0x03 +#define IPR_RES_TYPE_GENERIC_ATA 0x04 +#define IPR_RES_TYPE_ARRAY 0x05 +#define IPR_RES_TYPE_IOAFP 0xff + struct ipr_config_table_entry { u8 proto; #define IPR_PROTO_SATA 0x02 #define IPR_PROTO_SATA_ATAPI 0x03 #define IPR_PROTO_SAS_STP 0x06 -#define IPR_PROTO_SAS_STP_ATAPI 0x07 +#define IPR_PROTO_SAS_STP_ATAPI 0x07 u8 array_id; u8 flags; -#define IPR_IS_IOA_RESOURCE 0x80 -#define IPR_IS_ARRAY_MEMBER 0x20 -#define IPR_IS_HOT_SPARE 0x10 - +#define IPR_IS_IOA_RESOURCE 0x80 u8 rsvd_subtype; -#define IPR_RES_SUBTYPE(res) (((res)->cfgte.rsvd_subtype) & 0x0f) -#define IPR_SUBTYPE_AF_DASD 0 -#define IPR_SUBTYPE_GENERIC_SCSI 1 -#define IPR_SUBTYPE_VOLUME_SET 2 -#define IPR_SUBTYPE_GENERIC_ATA 4 - -#define IPR_QUEUEING_MODEL(res) ((((res)->cfgte.flags) & 0x70) >> 4) -#define IPR_QUEUE_FROZEN_MODEL 0 + +#define IPR_QUEUEING_MODEL(res) ((((res)->flags) & 0x70) >> 4) +#define IPR_QUEUE_FROZEN_MODEL 0 #define IPR_QUEUE_NACA_MODEL 1 struct ipr_res_addr res_addr; @@ -347,6 +354,28 @@ struct ipr_config_table_entry { struct ipr_std_inq_data std_inq_data; }__attribute__ ((packed, aligned (4))); +struct ipr_config_table_entry64 { + u8 res_type; + u8 proto; + u8 vset_num; + u8 array_id; + __be16 flags; + __be16 res_flags; +#define IPR_QUEUEING_MODEL64(res) ((((res)->res_flags) & 0x7000) >> 12) + __be32 res_handle; + u8 dev_id_type; + u8 reserved[3]; + __be64 dev_id; + __be64 lun; + __be64 lun_wwn[2]; +#define IPR_MAX_RES_PATH_LENGTH 24 + __be64 res_path; + struct ipr_std_inq_data std_inq_data; + u8 reserved2[4]; + __be64 reserved3[2]; // description text + u8 reserved4[8]; +}__attribute__ ((packed, aligned (8))); + struct ipr_config_table_hdr { u8 num_entries; u8 flags; @@ -354,13 +383,35 @@ struct ipr_config_table_hdr { __be16 reserved; }__attribute__((packed, aligned (4))); +struct ipr_config_table_hdr64 { + __be16 num_entries; + __be16 reserved; + u8 flags; + u8 reserved2[11]; +}__attribute__((packed, aligned (4))); + struct ipr_config_table { struct ipr_config_table_hdr hdr; - struct ipr_config_table_entry dev[IPR_MAX_PHYSICAL_DEVS]; + struct ipr_config_table_entry dev[0]; }__attribute__((packed, aligned (4))); +struct ipr_config_table64 { + struct ipr_config_table_hdr64 hdr64; + struct ipr_config_table_entry64 dev[0]; +}__attribute__((packed, aligned (8))); + +struct ipr_config_table_entry_wrapper { + union { + struct ipr_config_table_entry *cfgte; + struct ipr_config_table_entry64 *cfgte64; + } u; +}; + struct ipr_hostrcb_cfg_ch_not { - struct ipr_config_table_entry cfgte; + union { + struct ipr_config_table_entry cfgte; + struct ipr_config_table_entry64 cfgte64; + } u; u8 reserved[936]; }__attribute__((packed, aligned (4))); @@ -987,28 +1038,48 @@ struct ipr_sata_port { }; struct ipr_resource_entry { - struct ipr_config_table_entry cfgte; u8 needs_sync_complete:1; u8 in_erp:1; u8 add_to_ml:1; u8 del_from_ml:1; u8 resetting_device:1; + u32 bus; /* AKA channel */ + u32 target; /* AKA id */ + u32 lun; +#define IPR_ARRAY_VIRTUAL_BUS 0x1 +#define IPR_VSET_VIRTUAL_BUS 0x2 +#define IPR_IOAFP_VIRTUAL_BUS 0x3 + +#define IPR_GET_RES_PHYS_LOC(res) \ + (((res)->bus << 24) | ((res)->target << 8) | (res)->lun) + + u8 ata_class; + + u8 flags; + __be16 res_flags; + + __be32 type; + + u8 qmodel; + struct ipr_std_inq_data std_inq_data; + + __be32 res_handle; + __be64 dev_id; + struct scsi_lun dev_lun; + u8 res_path[8]; + + struct ipr_ioa_cfg *ioa_cfg; struct scsi_device *sdev; struct ipr_sata_port *sata_port; struct list_head queue; -}; +}; /* struct ipr_resource_entry */ struct ipr_resource_hdr { u16 num_entries; u16 reserved; }; -struct ipr_resource_table { - struct ipr_resource_hdr hdr; - struct ipr_resource_entry dev[IPR_MAX_PHYSICAL_DEVS]; -}; - struct ipr_misc_cbs { struct ipr_ioa_vpd ioa_vpd; struct ipr_inquiry_page0 page0_data; @@ -1133,6 +1204,13 @@ struct ipr_ioa_cfg { u8 revid; + /* + * Bitmaps for SIS64 generated target values + */ + unsigned long *target_ids; + unsigned long *array_ids; + unsigned long *vset_ids; + enum ipr_cache_state cache_state; u16 type; /* CCIN of the card */ @@ -1164,8 +1242,13 @@ struct ipr_ioa_cfg { char cfg_table_start[8]; #define IPR_CFG_TBL_START "cfg" - struct ipr_config_table *cfg_table; + union { + struct ipr_config_table *cfg_table; + struct ipr_config_table64 *cfg_table64; + } u; dma_addr_t cfg_table_dma; + u32 cfg_table_size; + u32 max_devs_supported; char resource_table_label[8]; #define IPR_RES_TABLE_LABEL "res_tbl" @@ -1234,7 +1317,7 @@ struct ipr_ioa_cfg { #define IPR_CMD_LABEL "ipr_cmd" struct ipr_cmnd *ipr_cmnd_list[IPR_NUM_CMD_BLKS]; dma_addr_t ipr_cmnd_list_dma[IPR_NUM_CMD_BLKS]; -}; +}; /* struct ipr_ioa_cfg */ struct ipr_cmnd { struct ipr_ioarcb ioarcb; @@ -1412,6 +1495,13 @@ struct ipr_ucode_image_header { #define ipr_info(...) printk(KERN_INFO IPR_NAME ": "__VA_ARGS__) #define ipr_dbg(...) IPR_DBG_CMD(printk(KERN_INFO IPR_NAME ": "__VA_ARGS__)) +#define ipr_res_printk(level, ioa_cfg, bus, target, lun, fmt, ...) \ + printk(level IPR_NAME ": %d:%d:%d:%d: " fmt, (ioa_cfg)->host->host_no, \ + bus, target, lun, ##__VA_ARGS__) + +#define ipr_res_err(ioa_cfg, res, fmt, ...) \ + ipr_res_printk(KERN_ERR, ioa_cfg, (res)->bus, (res)->target, (res)->lun, fmt, ##__VA_ARGS__) + #define ipr_ra_printk(level, ioa_cfg, ra, fmt, ...) \ printk(level IPR_NAME ": %d:%d:%d:%d: " fmt, (ioa_cfg)->host->host_no, \ (ra).bus, (ra).target, (ra).lun, ##__VA_ARGS__) @@ -1419,9 +1509,6 @@ struct ipr_ucode_image_header { #define ipr_ra_err(ioa_cfg, ra, fmt, ...) \ ipr_ra_printk(KERN_ERR, ioa_cfg, ra, fmt, ##__VA_ARGS__) -#define ipr_res_err(ioa_cfg, res, fmt, ...) \ - ipr_ra_err(ioa_cfg, (res)->cfgte.res_addr, fmt, ##__VA_ARGS__) - #define ipr_phys_res_err(ioa_cfg, res, fmt, ...) \ { \ if ((res).bus >= IPR_MAX_NUM_BUSES) { \ @@ -1467,7 +1554,7 @@ ipr_err("----------------------------------------------------------\n") **/ static inline int ipr_is_ioa_resource(struct ipr_resource_entry *res) { - return (res->cfgte.flags & IPR_IS_IOA_RESOURCE) ? 1 : 0; + return res->type == IPR_RES_TYPE_IOAFP; } /** @@ -1479,12 +1566,8 @@ static inline int ipr_is_ioa_resource(struct ipr_resource_entry *res) **/ static inline int ipr_is_af_dasd_device(struct ipr_resource_entry *res) { - if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data) && - !ipr_is_ioa_resource(res) && - IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_AF_DASD) - return 1; - else - return 0; + return res->type == IPR_RES_TYPE_AF_DASD || + res->type == IPR_RES_TYPE_REMOTE_AF_DASD; } /** @@ -1496,12 +1579,7 @@ static inline int ipr_is_af_dasd_device(struct ipr_resource_entry *res) **/ static inline int ipr_is_vset_device(struct ipr_resource_entry *res) { - if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data) && - !ipr_is_ioa_resource(res) && - IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_VOLUME_SET) - return 1; - else - return 0; + return res->type == IPR_RES_TYPE_VOLUME_SET; } /** @@ -1513,11 +1591,7 @@ static inline int ipr_is_vset_device(struct ipr_resource_entry *res) **/ static inline int ipr_is_gscsi(struct ipr_resource_entry *res) { - if (!ipr_is_ioa_resource(res) && - IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_GENERIC_SCSI) - return 1; - else - return 0; + return res->type == IPR_RES_TYPE_GENERIC_SCSI; } /** @@ -1530,7 +1604,7 @@ static inline int ipr_is_gscsi(struct ipr_resource_entry *res) static inline int ipr_is_scsi_disk(struct ipr_resource_entry *res) { if (ipr_is_af_dasd_device(res) || - (ipr_is_gscsi(res) && IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))) + (ipr_is_gscsi(res) && IPR_IS_DASD_DEVICE(res->std_inq_data))) return 1; else return 0; @@ -1545,11 +1619,7 @@ static inline int ipr_is_scsi_disk(struct ipr_resource_entry *res) **/ static inline int ipr_is_gata(struct ipr_resource_entry *res) { - if (!ipr_is_ioa_resource(res) && - IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_GENERIC_ATA) - return 1; - else - return 0; + return res->type == IPR_RES_TYPE_GENERIC_ATA; } /** @@ -1561,7 +1631,7 @@ static inline int ipr_is_gata(struct ipr_resource_entry *res) **/ static inline int ipr_is_naca_model(struct ipr_resource_entry *res) { - if (ipr_is_gscsi(res) && IPR_QUEUEING_MODEL(res) == IPR_QUEUE_NACA_MODEL) + if (ipr_is_gscsi(res) && res->qmodel == IPR_QUEUE_NACA_MODEL) return 1; return 0; } -- cgit v1.2.3 From 4565e3706329f65b5e64328b5369c53b6ab2715c Mon Sep 17 00:00:00 2001 From: Wayne Boyer Date: Fri, 19 Feb 2010 13:24:07 -0800 Subject: [SCSI] ipr: add error handling updates for the next generation chip Add support for the new log data notification and overlay IDs. Signed-off-by: Wayne Boyer Signed-off-by: James Bottomley --- drivers/scsi/ipr.c | 257 ++++++++++++++++++++++++++++++++++++++++++++++++++--- drivers/scsi/ipr.h | 160 +++++++++++++++++++++++++++++---- 2 files changed, 388 insertions(+), 29 deletions(-) diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 91e330a12721..b2e60bd4a0c6 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -1079,7 +1079,7 @@ static char *ipr_format_resource_path(u8 *res_path, char *buffer) sprintf(buffer, "%02X", res_path[0]); for (i=1; res_path[i] != 0xff; i++) - sprintf(buffer, "%s:%02X", buffer, res_path[i]); + sprintf(buffer, "%s-%02X", buffer, res_path[i]); return buffer; } @@ -1385,8 +1385,12 @@ static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd) static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg, struct ipr_hostrcb *hostrcb) { - struct ipr_hostrcb_type_12_error *error = - &hostrcb->hcam.u.error.u.type_12_error; + struct ipr_hostrcb_type_12_error *error; + + if (ioa_cfg->sis64) + error = &hostrcb->hcam.u.error64.u.type_12_error; + else + error = &hostrcb->hcam.u.error.u.type_12_error; ipr_err("-----Current Configuration-----\n"); ipr_err("Cache Directory Card Information:\n"); @@ -1478,6 +1482,48 @@ static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg, } } +/** + * ipr_log_sis64_config_error - Log a device error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * Return value: + * none + **/ +static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + int errors_logged, i; + struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry; + struct ipr_hostrcb_type_23_error *error; + char buffer[IPR_MAX_RES_PATH_LENGTH]; + + error = &hostrcb->hcam.u.error64.u.type_23_error; + errors_logged = be32_to_cpu(error->errors_logged); + + ipr_err("Device Errors Detected/Logged: %d/%d\n", + be32_to_cpu(error->errors_detected), errors_logged); + + dev_entry = error->dev; + + for (i = 0; i < errors_logged; i++, dev_entry++) { + ipr_err_separator; + + ipr_err("Device %d : %s", i + 1, + ipr_format_resource_path(&dev_entry->res_path[0], &buffer[0])); + ipr_log_ext_vpd(&dev_entry->vpd); + + ipr_err("-----New Device Information-----\n"); + ipr_log_ext_vpd(&dev_entry->new_vpd); + + ipr_err("Cache Directory Card Information:\n"); + ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd); + + ipr_err("Adapter Card Information:\n"); + ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd); + } +} + /** * ipr_log_config_error - Log a configuration error. * @ioa_cfg: ioa config struct @@ -1672,7 +1718,11 @@ static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg, { struct ipr_hostrcb_type_17_error *error; - error = &hostrcb->hcam.u.error.u.type_17_error; + if (ioa_cfg->sis64) + error = &hostrcb->hcam.u.error64.u.type_17_error; + else + error = &hostrcb->hcam.u.error.u.type_17_error; + error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; strim(error->failure_reason); @@ -1779,6 +1829,42 @@ static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb, fabric->ioa_port, fabric->cascaded_expander, fabric->phy); } +/** + * ipr_log64_fabric_path - Log a fabric path error + * @hostrcb: hostrcb struct + * @fabric: fabric descriptor + * + * Return value: + * none + **/ +static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb, + struct ipr_hostrcb64_fabric_desc *fabric) +{ + int i, j; + u8 path_state = fabric->path_state; + u8 active = path_state & IPR_PATH_ACTIVE_MASK; + u8 state = path_state & IPR_PATH_STATE_MASK; + char buffer[IPR_MAX_RES_PATH_LENGTH]; + + for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) { + if (path_active_desc[i].active != active) + continue; + + for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) { + if (path_state_desc[j].state != state) + continue; + + ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n", + path_active_desc[i].desc, path_state_desc[j].desc, + ipr_format_resource_path(&fabric->res_path[0], &buffer[0])); + return; + } + } + + ipr_err("Path state=%02X Resource Path=%s\n", path_state, + ipr_format_resource_path(&fabric->res_path[0], &buffer[0])); +} + static const struct { u8 type; char *desc; @@ -1887,6 +1973,49 @@ static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb, be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); } +/** + * ipr_log64_path_elem - Log a fabric path element. + * @hostrcb: hostrcb struct + * @cfg: fabric path element struct + * + * Return value: + * none + **/ +static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb, + struct ipr_hostrcb64_config_element *cfg) +{ + int i, j; + u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK; + u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK; + u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK; + char buffer[IPR_MAX_RES_PATH_LENGTH]; + + if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64) + return; + + for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) { + if (path_type_desc[i].type != type) + continue; + + for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) { + if (path_status_desc[j].status != status) + continue; + + ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n", + path_status_desc[j].desc, path_type_desc[i].desc, + ipr_format_resource_path(&cfg->res_path[0], &buffer[0]), + link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], + be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); + return; + } + } + ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s " + "WWN=%08X%08X\n", cfg->type_status, + ipr_format_resource_path(&cfg->res_path[0], &buffer[0]), + link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK], + be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1])); +} + /** * ipr_log_fabric_error - Log a fabric error. * @ioa_cfg: ioa config struct @@ -1924,6 +2053,96 @@ static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg, ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len); } +/** + * ipr_log_sis64_array_error - Log a sis64 array error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * Return value: + * none + **/ +static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + int i, num_entries; + struct ipr_hostrcb_type_24_error *error; + struct ipr_hostrcb64_array_data_entry *array_entry; + char buffer[IPR_MAX_RES_PATH_LENGTH]; + const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' }; + + error = &hostrcb->hcam.u.error64.u.type_24_error; + + ipr_err_separator; + + ipr_err("RAID %s Array Configuration: %s\n", + error->protection_level, + ipr_format_resource_path(&error->last_res_path[0], &buffer[0])); + + ipr_err_separator; + + array_entry = error->array_member; + num_entries = min_t(u32, be32_to_cpu(error->num_entries), + sizeof(error->array_member)); + + for (i = 0; i < num_entries; i++, array_entry++) { + + if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN)) + continue; + + if (error->exposed_mode_adn == i) + ipr_err("Exposed Array Member %d:\n", i); + else + ipr_err("Array Member %d:\n", i); + + ipr_err("Array Member %d:\n", i); + ipr_log_ext_vpd(&array_entry->vpd); + ipr_err("Current Location: %s", + ipr_format_resource_path(&array_entry->res_path[0], &buffer[0])); + ipr_err("Expected Location: %s", + ipr_format_resource_path(&array_entry->expected_res_path[0], &buffer[0])); + + ipr_err_separator; + } +} + +/** + * ipr_log_sis64_fabric_error - Log a sis64 fabric error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * Return value: + * none + **/ +static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + struct ipr_hostrcb_type_30_error *error; + struct ipr_hostrcb64_fabric_desc *fabric; + struct ipr_hostrcb64_config_element *cfg; + int i, add_len; + + error = &hostrcb->hcam.u.error64.u.type_30_error; + + error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; + ipr_hcam_err(hostrcb, "%s\n", error->failure_reason); + + add_len = be32_to_cpu(hostrcb->hcam.length) - + (offsetof(struct ipr_hostrcb64_error, u) + + offsetof(struct ipr_hostrcb_type_30_error, desc)); + + for (i = 0, fabric = error->desc; i < error->num_entries; i++) { + ipr_log64_fabric_path(hostrcb, fabric); + for_each_fabric_cfg(fabric, cfg) + ipr_log64_path_elem(hostrcb, cfg); + + add_len -= be16_to_cpu(fabric->length); + fabric = (struct ipr_hostrcb64_fabric_desc *) + ((unsigned long)fabric + be16_to_cpu(fabric->length)); + } + + ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len); +} + /** * ipr_log_generic_error - Log an adapter error. * @ioa_cfg: ioa config struct @@ -1983,13 +2202,16 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg, if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST) dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n"); - ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc); + if (ioa_cfg->sis64) + ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc); + else + ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); - if (ioasc == IPR_IOASC_BUS_WAS_RESET || - ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) { + if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET || + ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) { /* Tell the midlayer we had a bus reset so it will handle the UA properly */ scsi_report_bus_reset(ioa_cfg->host, - hostrcb->hcam.u.error.failing_dev_res_addr.bus); + hostrcb->hcam.u.error.fd_res_addr.bus); } error_index = ipr_get_error(ioasc); @@ -2037,6 +2259,16 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg, case IPR_HOST_RCB_OVERLAY_ID_20: ipr_log_fabric_error(ioa_cfg, hostrcb); break; + case IPR_HOST_RCB_OVERLAY_ID_23: + ipr_log_sis64_config_error(ioa_cfg, hostrcb); + break; + case IPR_HOST_RCB_OVERLAY_ID_24: + case IPR_HOST_RCB_OVERLAY_ID_26: + ipr_log_sis64_array_error(ioa_cfg, hostrcb); + break; + case IPR_HOST_RCB_OVERLAY_ID_30: + ipr_log_sis64_fabric_error(ioa_cfg, hostrcb); + break; case IPR_HOST_RCB_OVERLAY_ID_1: case IPR_HOST_RCB_OVERLAY_ID_DEFAULT: default: @@ -2061,7 +2293,12 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd) struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); - u32 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc); + u32 fd_ioasc; + + if (ioa_cfg->sis64) + fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc); + else + fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); list_del(&hostrcb->queue); list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); @@ -6996,7 +7233,7 @@ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg) if (!rc) { ipr_handle_log_data(ioa_cfg, hostrcb); - ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc); + ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED && ioa_cfg->sdt_state == GET_DUMP) ioa_cfg->sdt_state = WAIT_FOR_DUMP; diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index f10c57b3d215..e6e90179e45e 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -754,12 +754,29 @@ struct ipr_hostrcb_device_data_entry_enhanced { struct ipr_ext_vpd cfc_last_with_dev_vpd; }__attribute__((packed, aligned (4))); +struct ipr_hostrcb64_device_data_entry_enhanced { + struct ipr_ext_vpd vpd; + u8 ccin[4]; + u8 res_path[8]; + struct ipr_ext_vpd new_vpd; + u8 new_ccin[4]; + struct ipr_ext_vpd ioa_last_with_dev_vpd; + struct ipr_ext_vpd cfc_last_with_dev_vpd; +}__attribute__((packed, aligned (4))); + struct ipr_hostrcb_array_data_entry { struct ipr_vpd vpd; struct ipr_res_addr expected_dev_res_addr; struct ipr_res_addr dev_res_addr; }__attribute__((packed, aligned (4))); +struct ipr_hostrcb64_array_data_entry { + struct ipr_ext_vpd vpd; + u8 ccin[4]; + u8 expected_res_path[8]; + u8 res_path[8]; +}__attribute__((packed, aligned (4))); + struct ipr_hostrcb_array_data_entry_enhanced { struct ipr_ext_vpd vpd; u8 ccin[4]; @@ -811,6 +828,14 @@ struct ipr_hostrcb_type_13_error { struct ipr_hostrcb_device_data_entry_enhanced dev[3]; }__attribute__((packed, aligned (4))); +struct ipr_hostrcb_type_23_error { + struct ipr_ext_vpd ioa_vpd; + struct ipr_ext_vpd cfc_vpd; + __be32 errors_detected; + __be32 errors_logged; + struct ipr_hostrcb64_device_data_entry_enhanced dev[3]; +}__attribute__((packed, aligned (4))); + struct ipr_hostrcb_type_04_error { struct ipr_vpd ioa_vpd; struct ipr_vpd cfc_vpd; @@ -838,6 +863,22 @@ struct ipr_hostrcb_type_14_error { struct ipr_hostrcb_array_data_entry_enhanced array_member[18]; }__attribute__((packed, aligned (4))); +struct ipr_hostrcb_type_24_error { + struct ipr_ext_vpd ioa_vpd; + struct ipr_ext_vpd cfc_vpd; + u8 reserved[2]; + u8 exposed_mode_adn; +#define IPR_INVALID_ARRAY_DEV_NUM 0xff + u8 array_id; + u8 last_res_path[8]; + u8 protection_level[8]; + struct ipr_ext_vpd array_vpd; + u8 description[16]; + u8 reserved2[3]; + u8 num_entries; + struct ipr_hostrcb64_array_data_entry array_member[32]; +}__attribute__((packed, aligned (4))); + struct ipr_hostrcb_type_07_error { u8 failure_reason[64]; struct ipr_vpd vpd; @@ -875,6 +916,22 @@ struct ipr_hostrcb_config_element { __be32 wwid[2]; }__attribute__((packed, aligned (4))); +struct ipr_hostrcb64_config_element { + __be16 length; + u8 descriptor_id; +#define IPR_DESCRIPTOR_MASK 0xC0 +#define IPR_DESCRIPTOR_SIS64 0x00 + + u8 reserved; + u8 type_status; + + u8 reserved2[2]; + u8 link_rate; + + u8 res_path[8]; + __be32 wwid[2]; +}__attribute__((packed, aligned (8))); + struct ipr_hostrcb_fabric_desc { __be16 length; u8 ioa_port; @@ -896,6 +953,20 @@ struct ipr_hostrcb_fabric_desc { struct ipr_hostrcb_config_element elem[1]; }__attribute__((packed, aligned (4))); +struct ipr_hostrcb64_fabric_desc { + __be16 length; + u8 descriptor_id; + + u8 reserved; + u8 path_state; + + u8 reserved2[2]; + u8 res_path[8]; + u8 reserved3[6]; + __be16 num_entries; + struct ipr_hostrcb64_config_element elem[1]; +}__attribute__((packed, aligned (8))); + #define for_each_fabric_cfg(fabric, cfg) \ for (cfg = (fabric)->elem; \ cfg < ((fabric)->elem + be16_to_cpu((fabric)->num_entries)); \ @@ -908,10 +979,17 @@ struct ipr_hostrcb_type_20_error { struct ipr_hostrcb_fabric_desc desc[1]; }__attribute__((packed, aligned (4))); +struct ipr_hostrcb_type_30_error { + u8 failure_reason[64]; + u8 reserved[3]; + u8 num_entries; + struct ipr_hostrcb64_fabric_desc desc[1]; +}__attribute__((packed, aligned (4))); + struct ipr_hostrcb_error { - __be32 failing_dev_ioasc; - struct ipr_res_addr failing_dev_res_addr; - __be32 failing_dev_res_handle; + __be32 fd_ioasc; + struct ipr_res_addr fd_res_addr; + __be32 fd_res_handle; __be32 prc; union { struct ipr_hostrcb_type_ff_error type_ff_error; @@ -928,6 +1006,26 @@ struct ipr_hostrcb_error { } u; }__attribute__((packed, aligned (4))); +struct ipr_hostrcb64_error { + __be32 fd_ioasc; + __be32 ioa_fw_level; + __be32 fd_res_handle; + __be32 prc; + __be64 fd_dev_id; + __be64 fd_lun; + u8 fd_res_path[8]; + __be64 time_stamp; + u8 reserved[2]; + union { + struct ipr_hostrcb_type_ff_error type_ff_error; + struct ipr_hostrcb_type_12_error type_12_error; + struct ipr_hostrcb_type_17_error type_17_error; + struct ipr_hostrcb_type_23_error type_23_error; + struct ipr_hostrcb_type_24_error type_24_error; + struct ipr_hostrcb_type_30_error type_30_error; + } u; +}__attribute__((packed, aligned (8))); + struct ipr_hostrcb_raw { __be32 data[sizeof(struct ipr_hostrcb_error)/sizeof(__be32)]; }__attribute__((packed, aligned (4))); @@ -965,7 +1063,11 @@ struct ipr_hcam { #define IPR_HOST_RCB_OVERLAY_ID_16 0x16 #define IPR_HOST_RCB_OVERLAY_ID_17 0x17 #define IPR_HOST_RCB_OVERLAY_ID_20 0x20 -#define IPR_HOST_RCB_OVERLAY_ID_DEFAULT 0xFF +#define IPR_HOST_RCB_OVERLAY_ID_23 0x23 +#define IPR_HOST_RCB_OVERLAY_ID_24 0x24 +#define IPR_HOST_RCB_OVERLAY_ID_26 0x26 +#define IPR_HOST_RCB_OVERLAY_ID_30 0x30 +#define IPR_HOST_RCB_OVERLAY_ID_DEFAULT 0xFF u8 reserved1[3]; __be32 ilid; @@ -975,6 +1077,7 @@ struct ipr_hcam { union { struct ipr_hostrcb_error error; + struct ipr_hostrcb64_error error64; struct ipr_hostrcb_cfg_ch_not ccn; struct ipr_hostrcb_raw raw; } u; @@ -985,6 +1088,7 @@ struct ipr_hostrcb { dma_addr_t hostrcb_dma; struct list_head queue; struct ipr_ioa_cfg *ioa_cfg; + char rp_buffer[IPR_MAX_RES_PATH_LENGTH]; }; /* IPR smart dump table structures */ @@ -1521,14 +1625,21 @@ struct ipr_ucode_image_header { } #define ipr_hcam_err(hostrcb, fmt, ...) \ -{ \ - if (ipr_is_device(&(hostrcb)->hcam.u.error.failing_dev_res_addr)) { \ - ipr_ra_err((hostrcb)->ioa_cfg, \ - (hostrcb)->hcam.u.error.failing_dev_res_addr, \ - fmt, ##__VA_ARGS__); \ - } else { \ - dev_err(&(hostrcb)->ioa_cfg->pdev->dev, fmt, ##__VA_ARGS__); \ - } \ +{ \ + if (ipr_is_device(hostrcb)) { \ + if ((hostrcb)->ioa_cfg->sis64) { \ + printk(KERN_ERR IPR_NAME ": %s: " fmt, \ + ipr_format_resource_path(&hostrcb->hcam.u.error64.fd_res_path[0], \ + &hostrcb->rp_buffer[0]), \ + __VA_ARGS__); \ + } else { \ + ipr_ra_err((hostrcb)->ioa_cfg, \ + (hostrcb)->hcam.u.error.fd_res_addr, \ + fmt, __VA_ARGS__); \ + } \ + } else { \ + dev_err(&(hostrcb)->ioa_cfg->pdev->dev, fmt, __VA_ARGS__); \ + } \ } #define ipr_trace ipr_dbg("%s: %s: Line: %d\n",\ @@ -1637,18 +1748,29 @@ static inline int ipr_is_naca_model(struct ipr_resource_entry *res) } /** - * ipr_is_device - Determine if resource address is that of a device - * @res_addr: resource address struct + * ipr_is_device - Determine if the hostrcb structure is related to a device + * @hostrcb: host resource control blocks struct * * Return value: * 1 if AF / 0 if not AF **/ -static inline int ipr_is_device(struct ipr_res_addr *res_addr) +static inline int ipr_is_device(struct ipr_hostrcb *hostrcb) { - if ((res_addr->bus < IPR_MAX_NUM_BUSES) && - (res_addr->target < (IPR_MAX_NUM_TARGETS_PER_BUS - 1))) - return 1; - + struct ipr_res_addr *res_addr; + u8 *res_path; + + if (hostrcb->ioa_cfg->sis64) { + res_path = &hostrcb->hcam.u.error64.fd_res_path[0]; + if ((res_path[0] == 0x00 || res_path[0] == 0x80 || + res_path[0] == 0x81) && res_path[2] != 0xFF) + return 1; + } else { + res_addr = &hostrcb->hcam.u.error.fd_res_addr; + + if ((res_addr->bus < IPR_MAX_NUM_BUSES) && + (res_addr->target < (IPR_MAX_NUM_TARGETS_PER_BUS - 1))) + return 1; + } return 0; } -- cgit v1.2.3 From dcbad00e6b403089b1846e788bc1a0c67b2bfd2d Mon Sep 17 00:00:00 2001 From: Wayne Boyer Date: Fri, 19 Feb 2010 13:24:14 -0800 Subject: [SCSI] ipr: add hardware assisted smart dump functionality This patch adds the hardware assisted smart dump functionality for the next generation IOA PCI interface chip. Signea-off-by: Wayne Boyer Signed-off-by: James Bottomley --- drivers/scsi/ipr.c | 81 +++++++++++++++++++++++++++++++++++++++++++----------- drivers/scsi/ipr.h | 16 +++++++---- 2 files changed, 75 insertions(+), 22 deletions(-) diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index b2e60bd4a0c6..dd12486ba520 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -142,7 +142,9 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { .ioarrin_reg = 0x00070, .sense_uproc_interrupt_reg = 0x00020, .set_uproc_interrupt_reg = 0x00020, - .clr_uproc_interrupt_reg = 0x00028 + .clr_uproc_interrupt_reg = 0x00028, + .dump_addr_reg = 0x00064, + .dump_data_reg = 0x00068 } }, }; @@ -2513,6 +2515,31 @@ static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay) return -EIO; } +/** + * ipr_get_sis64_dump_data_section - Dump IOA memory + * @ioa_cfg: ioa config struct + * @start_addr: adapter address to dump + * @dest: destination kernel buffer + * @length_in_words: length to dump in 4 byte words + * + * Return value: + * 0 on success + **/ +static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg, + u32 start_addr, + __be32 *dest, u32 length_in_words) +{ + int i; + + for (i = 0; i < length_in_words; i++) { + writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg); + *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg)); + dest++; + } + + return 0; +} + /** * ipr_get_ldump_data_section - Dump IOA memory * @ioa_cfg: ioa config struct @@ -2530,6 +2557,10 @@ static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg, volatile u32 temp_pcii_reg; int i, delay = 0; + if (ioa_cfg->sis64) + return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr, + dest, length_in_words); + /* Write IOA interrupt reg starting LDUMP state */ writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT), ioa_cfg->regs.set_uproc_interrupt_reg); @@ -2787,6 +2818,7 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) u32 num_entries, start_off, end_off; u32 bytes_to_copy, bytes_copied, rc; struct ipr_sdt *sdt; + int valid = 1; int i; ENTER; @@ -2800,7 +2832,7 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) start_addr = readl(ioa_cfg->ioa_mailbox); - if (!ipr_sdt_is_fmt2(start_addr)) { + if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) { dev_err(&ioa_cfg->pdev->dev, "Invalid dump table format: %lx\n", start_addr); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); @@ -2829,7 +2861,6 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) /* IOA Dump entry */ ipr_init_dump_entry_hdr(&ioa_dump->hdr); - ioa_dump->format = IPR_SDT_FMT2; ioa_dump->hdr.len = 0; ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID; @@ -2844,7 +2875,8 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) sizeof(struct ipr_sdt) / sizeof(__be32)); /* Smart Dump table is ready to use and the first entry is valid */ - if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) { + if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) && + (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) { dev_err(&ioa_cfg->pdev->dev, "Dump of IOA failed. Dump table not valid: %d, %X.\n", rc, be32_to_cpu(sdt->hdr.state)); @@ -2868,12 +2900,19 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) } if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) { - sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset); - start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK; - end_off = be32_to_cpu(sdt->entry[i].end_offset); - - if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) { - bytes_to_copy = end_off - start_off; + sdt_word = be32_to_cpu(sdt->entry[i].start_token); + if (ioa_cfg->sis64) + bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token); + else { + start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK; + end_off = be32_to_cpu(sdt->entry[i].end_token); + + if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) + bytes_to_copy = end_off - start_off; + else + valid = 0; + } + if (valid) { if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) { sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY; continue; @@ -7202,7 +7241,7 @@ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg) mailbox = readl(ioa_cfg->ioa_mailbox); - if (!ipr_sdt_is_fmt2(mailbox)) { + if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) { ipr_unit_check_no_data(ioa_cfg); return; } @@ -7211,15 +7250,20 @@ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg) rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt, (sizeof(struct ipr_uc_sdt)) / sizeof(__be32)); - if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) || - !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) { + if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) || + ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) && + (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) { ipr_unit_check_no_data(ioa_cfg); return; } /* Find length of the first sdt entry (UC buffer) */ - length = (be32_to_cpu(sdt.entry[0].end_offset) - - be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK; + if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE) + length = be32_to_cpu(sdt.entry[0].end_token); + else + length = (be32_to_cpu(sdt.entry[0].end_token) - + be32_to_cpu(sdt.entry[0].start_token)) & + IPR_FMT2_MBX_ADDR_MASK; hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next, struct ipr_hostrcb, queue); @@ -7227,7 +7271,7 @@ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg) memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam)); rc = ipr_get_ldump_data_section(ioa_cfg, - be32_to_cpu(sdt.entry[0].bar_str_offset), + be32_to_cpu(sdt.entry[0].start_token), (__be32 *)&hostrcb->hcam, min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32)); @@ -8202,6 +8246,11 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg; t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg; t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg; + + if (ioa_cfg->sis64) { + t->dump_addr_reg = base + p->dump_addr_reg; + t->dump_data_reg = base + p->dump_data_reg; + } } /** diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index e6e90179e45e..4f2f1d2e9875 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -228,6 +228,7 @@ #define IPR_SDT_FMT2_BAR5_SEL 0x5 #define IPR_SDT_FMT2_EXP_ROM_SEL 0x8 #define IPR_FMT2_SDT_READY_TO_USE 0xC4D4E3F2 +#define IPR_FMT3_SDT_READY_TO_USE 0xC4D4E3F3 #define IPR_DOORBELL 0x82800000 #define IPR_RUNTIME_RESET 0x40000000 @@ -1093,10 +1094,9 @@ struct ipr_hostrcb { /* IPR smart dump table structures */ struct ipr_sdt_entry { - __be32 bar_str_offset; - __be32 end_offset; - u8 entry_byte; - u8 reserved[3]; + __be32 start_token; + __be32 end_token; + u8 reserved[4]; u8 flags; #define IPR_SDT_ENDIAN 0x80 @@ -1204,6 +1204,9 @@ struct ipr_interrupt_offsets { unsigned long sense_uproc_interrupt_reg; unsigned long set_uproc_interrupt_reg; unsigned long clr_uproc_interrupt_reg; + + unsigned long dump_addr_reg; + unsigned long dump_data_reg; }; struct ipr_interrupts { @@ -1217,6 +1220,9 @@ struct ipr_interrupts { void __iomem *sense_uproc_interrupt_reg; void __iomem *set_uproc_interrupt_reg; void __iomem *clr_uproc_interrupt_reg; + + void __iomem *dump_addr_reg; + void __iomem *dump_data_reg; }; struct ipr_chip_cfg_t { @@ -1536,8 +1542,6 @@ struct ipr_ioa_dump { u32 next_page_index; u32 page_offset; u32 format; -#define IPR_SDT_FMT2 2 -#define IPR_SDT_UNKNOWN 3 }__attribute__((packed, aligned (4))); struct ipr_dump { -- cgit v1.2.3 From f72919ec2bbbe1c42cdda7857a96c0c40e1d78aa Mon Sep 17 00:00:00 2001 From: Wayne Boyer Date: Fri, 19 Feb 2010 13:24:21 -0800 Subject: [SCSI] ipr: implement shutdown changes and remove obsolete write cache parameter This patch adds a reboot notifier that will issue a shutdown prepare command to all adapters. This helps to prevent a problem where the primary adapter can get shut down before the secondary adapter and cause the secondary adapter to fail over and log and error. This patch also removes the "enable_cache" paramater as it is obsolete. Write cache for an adapter is now controlled from the iprconfig utility. Signed-off-by: Wayne Boyer Signed-off-by: James Bottomley --- drivers/scsi/ipr.c | 208 +++++++++++++++++------------------------------------ drivers/scsi/ipr.h | 10 +-- 2 files changed, 66 insertions(+), 152 deletions(-) diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index dd12486ba520..a64fb5085882 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -72,6 +72,7 @@ #include #include #include +#include #include #include #include @@ -92,7 +93,6 @@ static unsigned int ipr_max_speed = 1; static int ipr_testmode = 0; static unsigned int ipr_fastfail = 0; static unsigned int ipr_transop_timeout = 0; -static unsigned int ipr_enable_cache = 1; static unsigned int ipr_debug = 0; static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS; static unsigned int ipr_dual_ioa_raid = 1; @@ -175,8 +175,6 @@ module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries"); module_param_named(transop_timeout, ipr_transop_timeout, int, 0); MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)"); -module_param_named(enable_cache, ipr_enable_cache, int, 0); -MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)"); module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)"); module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0); @@ -3097,105 +3095,6 @@ static struct bin_attribute ipr_trace_attr = { }; #endif -static const struct { - enum ipr_cache_state state; - char *name; -} cache_state [] = { - { CACHE_NONE, "none" }, - { CACHE_DISABLED, "disabled" }, - { CACHE_ENABLED, "enabled" } -}; - -/** - * ipr_show_write_caching - Show the write caching attribute - * @dev: device struct - * @buf: buffer - * - * Return value: - * number of bytes printed to buffer - **/ -static ssize_t ipr_show_write_caching(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct Scsi_Host *shost = class_to_shost(dev); - struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; - unsigned long lock_flags = 0; - int i, len = 0; - - spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); - for (i = 0; i < ARRAY_SIZE(cache_state); i++) { - if (cache_state[i].state == ioa_cfg->cache_state) { - len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name); - break; - } - } - spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - return len; -} - - -/** - * ipr_store_write_caching - Enable/disable adapter write cache - * @dev: device struct - * @buf: buffer - * @count: buffer size - * - * This function will enable/disable adapter write cache. - * - * Return value: - * count on success / other on failure - **/ -static ssize_t ipr_store_write_caching(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct Scsi_Host *shost = class_to_shost(dev); - struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; - unsigned long lock_flags = 0; - enum ipr_cache_state new_state = CACHE_INVALID; - int i; - - if (!capable(CAP_SYS_ADMIN)) - return -EACCES; - if (ioa_cfg->cache_state == CACHE_NONE) - return -EINVAL; - - for (i = 0; i < ARRAY_SIZE(cache_state); i++) { - if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) { - new_state = cache_state[i].state; - break; - } - } - - if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED) - return -EINVAL; - - spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); - if (ioa_cfg->cache_state == new_state) { - spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - return count; - } - - ioa_cfg->cache_state = new_state; - dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n", - new_state == CACHE_ENABLED ? "Enabling" : "Disabling"); - if (!ioa_cfg->in_reset_reload) - ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); - spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); - - return count; -} - -static struct device_attribute ipr_ioa_cache_attr = { - .attr = { - .name = "write_cache", - .mode = S_IRUGO | S_IWUSR, - }, - .show = ipr_show_write_caching, - .store = ipr_store_write_caching -}; - /** * ipr_show_fw_version - Show the firmware version * @dev: class device struct @@ -3797,7 +3696,6 @@ static struct device_attribute *ipr_ioa_attrs[] = { &ipr_ioa_state_attr, &ipr_ioa_reset_attr, &ipr_update_fw_attr, - &ipr_ioa_cache_attr, NULL, }; @@ -6292,36 +6190,6 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd) return IPR_RC_JOB_CONTINUE; } -/** - * ipr_setup_write_cache - Disable write cache if needed - * @ipr_cmd: ipr command struct - * - * This function sets up adapters write cache to desired setting - * - * Return value: - * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN - **/ -static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd) -{ - struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; - - ipr_cmd->job_step = ipr_set_supported_devs; - ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, - struct ipr_resource_entry, queue); - - if (ioa_cfg->cache_state != CACHE_DISABLED) - return IPR_RC_JOB_CONTINUE; - - ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); - ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; - ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; - ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL; - - ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); - - return IPR_RC_JOB_RETURN; -} - /** * ipr_get_mode_page - Locate specified mode page * @mode_pages: mode page buffer @@ -6522,7 +6390,9 @@ static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd) ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), length); - ipr_cmd->job_step = ipr_setup_write_cache; + ipr_cmd->job_step = ipr_set_supported_devs; + ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, + struct ipr_resource_entry, queue); ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); LEAVE; @@ -6590,10 +6460,13 @@ static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd) **/ static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd) { + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) { - ipr_cmd->job_step = ipr_setup_write_cache; + ipr_cmd->job_step = ipr_set_supported_devs; + ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, + struct ipr_resource_entry, queue); return IPR_RC_JOB_CONTINUE; } @@ -6944,13 +6817,9 @@ static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd) static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; - struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data; ENTER; - if (!ipr_inquiry_page_supported(page0, 1)) - ioa_cfg->cache_state = CACHE_NONE; - ipr_cmd->job_step = ipr_ioafp_cap_inquiry; ipr_ioafp_inquiry(ipr_cmd, 1, 3, @@ -8209,10 +8078,6 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, init_waitqueue_head(&ioa_cfg->reset_wait_q); init_waitqueue_head(&ioa_cfg->msi_wait_q); ioa_cfg->sdt_state = INACTIVE; - if (ipr_enable_cache) - ioa_cfg->cache_state = CACHE_ENABLED; - else - ioa_cfg->cache_state = CACHE_DISABLED; ipr_initialize_bus_attr(ioa_cfg); ioa_cfg->max_devs_supported = ipr_max_devs; @@ -8841,6 +8706,61 @@ static struct pci_driver ipr_driver = { .err_handler = &ipr_err_handler, }; +/** + * ipr_halt_done - Shutdown prepare completion + * + * Return value: + * none + **/ +static void ipr_halt_done(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); +} + +/** + * ipr_halt - Issue shutdown prepare to all adapters + * + * Return value: + * NOTIFY_OK on success / NOTIFY_DONE on failure + **/ +static int ipr_halt(struct notifier_block *nb, ulong event, void *buf) +{ + struct ipr_cmnd *ipr_cmd; + struct ipr_ioa_cfg *ioa_cfg; + unsigned long flags = 0; + + if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF) + return NOTIFY_DONE; + + spin_lock(&ipr_driver_lock); + + list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) { + spin_lock_irqsave(ioa_cfg->host->host_lock, flags); + if (!ioa_cfg->allow_cmds) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); + continue; + } + + ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); + ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); + ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; + ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; + ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL; + + ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); + } + spin_unlock(&ipr_driver_lock); + + return NOTIFY_OK; +} + +static struct notifier_block ipr_notifier = { + ipr_halt, NULL, 0 +}; + /** * ipr_init - Module entry point * @@ -8852,6 +8772,7 @@ static int __init ipr_init(void) ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n", IPR_DRIVER_VERSION, IPR_DRIVER_DATE); + register_reboot_notifier(&ipr_notifier); return pci_register_driver(&ipr_driver); } @@ -8865,6 +8786,7 @@ static int __init ipr_init(void) **/ static void __exit ipr_exit(void) { + unregister_reboot_notifier(&ipr_notifier); pci_unregister_driver(&ipr_driver); } diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 4f2f1d2e9875..36736b512846 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -136,7 +136,7 @@ /* We need resources for HCAMS, IOA reset, IOA bringdown, and ERP */ #define IPR_NUM_INTERNAL_CMD_BLKS (IPR_NUM_HCAMS + \ - ((IPR_NUM_RESET_RELOAD_RETRIES + 1) * 2) + 3) + ((IPR_NUM_RESET_RELOAD_RETRIES + 1) * 2) + 4) #define IPR_MAX_COMMANDS IPR_NUM_BASE_CMD_BLKS #define IPR_NUM_CMD_BLKS (IPR_NUM_BASE_CMD_BLKS + \ @@ -1284,13 +1284,6 @@ enum ipr_sdt_state { DUMP_OBTAINED }; -enum ipr_cache_state { - CACHE_NONE, - CACHE_DISABLED, - CACHE_ENABLED, - CACHE_INVALID -}; - /* Per-controller data */ struct ipr_ioa_cfg { char eye_catcher[8]; @@ -1321,7 +1314,6 @@ struct ipr_ioa_cfg { unsigned long *array_ids; unsigned long *vset_ids; - enum ipr_cache_state cache_state; u16 type; /* CCIN of the card */ u8 log_level; -- cgit v1.2.3 From 214777ba125e2902c9b84c764be38099c94d0bd2 Mon Sep 17 00:00:00 2001 From: Wayne Boyer Date: Fri, 19 Feb 2010 13:24:26 -0800 Subject: [SCSI] ipr: add support for multiple stages of initialization This patch adds support for using the new IOA initialization feedback register. It also enables 64 bit support in the ipr_ioafp_identify_hrrq and ipr_mask_and_clear_interrupts routines. Signed-off-by: Wayne Boyer Signed-off-by: James Bottomley --- drivers/scsi/ipr.c | 185 ++++++++++++++++++++++++++++++++++++++++++++--------- drivers/scsi/ipr.h | 25 ++++++++ 2 files changed, 180 insertions(+), 30 deletions(-) diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index a64fb5085882..10b162d607ac 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -106,13 +106,20 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { { .set_interrupt_mask_reg = 0x0022C, .clr_interrupt_mask_reg = 0x00230, + .clr_interrupt_mask_reg32 = 0x00230, .sense_interrupt_mask_reg = 0x0022C, + .sense_interrupt_mask_reg32 = 0x0022C, .clr_interrupt_reg = 0x00228, + .clr_interrupt_reg32 = 0x00228, .sense_interrupt_reg = 0x00224, + .sense_interrupt_reg32 = 0x00224, .ioarrin_reg = 0x00404, .sense_uproc_interrupt_reg = 0x00214, + .sense_uproc_interrupt_reg32 = 0x00214, .set_uproc_interrupt_reg = 0x00214, - .clr_uproc_interrupt_reg = 0x00218 + .set_uproc_interrupt_reg32 = 0x00214, + .clr_uproc_interrupt_reg = 0x00218, + .clr_uproc_interrupt_reg32 = 0x00218 } }, { /* Snipe and Scamp */ @@ -121,13 +128,20 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { { .set_interrupt_mask_reg = 0x00288, .clr_interrupt_mask_reg = 0x0028C, + .clr_interrupt_mask_reg32 = 0x0028C, .sense_interrupt_mask_reg = 0x00288, + .sense_interrupt_mask_reg32 = 0x00288, .clr_interrupt_reg = 0x00284, + .clr_interrupt_reg32 = 0x00284, .sense_interrupt_reg = 0x00280, + .sense_interrupt_reg32 = 0x00280, .ioarrin_reg = 0x00504, .sense_uproc_interrupt_reg = 0x00290, + .sense_uproc_interrupt_reg32 = 0x00290, .set_uproc_interrupt_reg = 0x00290, - .clr_uproc_interrupt_reg = 0x00294 + .set_uproc_interrupt_reg32 = 0x00290, + .clr_uproc_interrupt_reg = 0x00294, + .clr_uproc_interrupt_reg32 = 0x00294 } }, { /* CRoC */ @@ -136,13 +150,21 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { { .set_interrupt_mask_reg = 0x00010, .clr_interrupt_mask_reg = 0x00018, + .clr_interrupt_mask_reg32 = 0x0001C, .sense_interrupt_mask_reg = 0x00010, + .sense_interrupt_mask_reg32 = 0x00014, .clr_interrupt_reg = 0x00008, + .clr_interrupt_reg32 = 0x0000C, .sense_interrupt_reg = 0x00000, + .sense_interrupt_reg32 = 0x00004, .ioarrin_reg = 0x00070, .sense_uproc_interrupt_reg = 0x00020, + .sense_uproc_interrupt_reg32 = 0x00024, .set_uproc_interrupt_reg = 0x00020, + .set_uproc_interrupt_reg32 = 0x00024, .clr_uproc_interrupt_reg = 0x00028, + .clr_uproc_interrupt_reg32 = 0x0002C, + .init_feedback_reg = 0x0005C, .dump_addr_reg = 0x00064, .dump_data_reg = 0x00068 } @@ -592,10 +614,15 @@ static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg, ioa_cfg->allow_interrupts = 0; /* Set interrupt mask to stop all new interrupts */ - writel(~0, ioa_cfg->regs.set_interrupt_mask_reg); + if (ioa_cfg->sis64) + writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg); + else + writel(~0, ioa_cfg->regs.set_interrupt_mask_reg); /* Clear any pending interrupts */ - writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg); + if (ioa_cfg->sis64) + writel(~0, ioa_cfg->regs.clr_interrupt_reg); + writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32); int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); } @@ -2561,7 +2588,7 @@ static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg, /* Write IOA interrupt reg starting LDUMP state */ writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT), - ioa_cfg->regs.set_uproc_interrupt_reg); + ioa_cfg->regs.set_uproc_interrupt_reg32); /* Wait for IO debug acknowledge */ if (ipr_wait_iodbg_ack(ioa_cfg, @@ -2580,7 +2607,7 @@ static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg, /* Signal address valid - clear IOA Reset alert */ writel(IPR_UPROCI_RESET_ALERT, - ioa_cfg->regs.clr_uproc_interrupt_reg); + ioa_cfg->regs.clr_uproc_interrupt_reg32); for (i = 0; i < length_in_words; i++) { /* Wait for IO debug acknowledge */ @@ -2605,10 +2632,10 @@ static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg, /* Signal end of block transfer. Set reset alert then clear IO debug ack */ writel(IPR_UPROCI_RESET_ALERT, - ioa_cfg->regs.set_uproc_interrupt_reg); + ioa_cfg->regs.set_uproc_interrupt_reg32); writel(IPR_UPROCI_IO_DEBUG_ALERT, - ioa_cfg->regs.clr_uproc_interrupt_reg); + ioa_cfg->regs.clr_uproc_interrupt_reg32); /* Signal dump data received - Clear IO debug Ack */ writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, @@ -2617,7 +2644,7 @@ static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg, /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */ while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) { temp_pcii_reg = - readl(ioa_cfg->regs.sense_uproc_interrupt_reg); + readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT)) return 0; @@ -4831,11 +4858,29 @@ static irqreturn_t ipr_isr(int irq, void *devp) return IRQ_NONE; } - int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); - int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; + int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg; - /* If an interrupt on the adapter did not occur, ignore it */ + /* If an interrupt on the adapter did not occur, ignore it. + * Or in the case of SIS 64, check for a stage change interrupt. + */ if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) { + if (ioa_cfg->sis64) { + int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; + if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) { + + /* clear stage change */ + writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg); + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; + list_del(&ioa_cfg->reset_cmd->queue); + del_timer(&ioa_cfg->reset_cmd->timer); + ipr_reset_ioa_job(ioa_cfg->reset_cmd); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return IRQ_HANDLED; + } + } + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return IRQ_NONE; } @@ -4878,8 +4923,8 @@ static irqreturn_t ipr_isr(int irq, void *devp) if (ipr_cmd != NULL) { /* Clear the PCI interrupt */ do { - writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg); - int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; + writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32); + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg; } while (int_reg & IPR_PCII_HRRQ_UPDATED && num_hrrq++ < IPR_MAX_HRRQ_RETRIES); @@ -6887,7 +6932,7 @@ static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd) } /** - * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ. + * ipr_ioafp_identify_hrrq - Send Identify Host RRQ. * @ipr_cmd: ipr command struct * * This function send an Identify Host Request Response Queue @@ -6896,7 +6941,7 @@ static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd) * Return value: * IPR_RC_JOB_RETURN **/ -static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd) +static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; @@ -6908,19 +6953,32 @@ static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd) ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; + if (ioa_cfg->sis64) + ioarcb->cmd_pkt.cdb[1] = 0x1; ioarcb->cmd_pkt.cdb[2] = - ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff; + ((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff; ioarcb->cmd_pkt.cdb[3] = - ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff; + ((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff; ioarcb->cmd_pkt.cdb[4] = - ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff; + ((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff; ioarcb->cmd_pkt.cdb[5] = - ((u32) ioa_cfg->host_rrq_dma) & 0xff; + ((u64) ioa_cfg->host_rrq_dma) & 0xff; ioarcb->cmd_pkt.cdb[7] = ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff; ioarcb->cmd_pkt.cdb[8] = (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff; + if (ioa_cfg->sis64) { + ioarcb->cmd_pkt.cdb[10] = + ((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff; + ioarcb->cmd_pkt.cdb[11] = + ((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff; + ioarcb->cmd_pkt.cdb[12] = + ((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff; + ioarcb->cmd_pkt.cdb[13] = + ((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff; + } + ipr_cmd->job_step = ipr_ioafp_std_inquiry; ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); @@ -7004,6 +7062,57 @@ static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg) memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size); } +/** + * ipr_reset_next_stage - Process IPL stage change based on feedback register. + * @ipr_cmd: ipr command struct + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd) +{ + unsigned long stage, stage_time; + u32 feedback; + volatile u32 int_reg; + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + u64 maskval = 0; + + feedback = readl(ioa_cfg->regs.init_feedback_reg); + stage = feedback & IPR_IPL_INIT_STAGE_MASK; + stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK; + + ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time); + + /* sanity check the stage_time value */ + if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME) + stage_time = IPR_IPL_INIT_MIN_STAGE_TIME; + else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT) + stage_time = IPR_LONG_OPERATIONAL_TIMEOUT; + + if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) { + writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg); + int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); + stage_time = ioa_cfg->transop_timeout; + ipr_cmd->job_step = ipr_ioafp_identify_hrrq; + } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) { + ipr_cmd->job_step = ipr_ioafp_identify_hrrq; + maskval = IPR_PCII_IPL_STAGE_CHANGE; + maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER; + writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg); + int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); + return IPR_RC_JOB_CONTINUE; + } + + ipr_cmd->timer.data = (unsigned long) ipr_cmd; + ipr_cmd->timer.expires = jiffies + stage_time * HZ; + ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout; + ipr_cmd->done = ipr_reset_ioa_job; + add_timer(&ipr_cmd->timer); + list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); + + return IPR_RC_JOB_RETURN; +} + /** * ipr_reset_enable_ioa - Enable the IOA following a reset. * @ipr_cmd: ipr command struct @@ -7020,7 +7129,7 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd) volatile u32 int_reg; ENTER; - ipr_cmd->job_step = ipr_ioafp_indentify_hrrq; + ipr_cmd->job_step = ipr_ioafp_identify_hrrq; ipr_init_ioa_mem(ioa_cfg); ioa_cfg->allow_interrupts = 1; @@ -7028,19 +7137,27 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd) if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED), - ioa_cfg->regs.clr_interrupt_mask_reg); + ioa_cfg->regs.clr_interrupt_mask_reg32); int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); return IPR_RC_JOB_CONTINUE; } /* Enable destructive diagnostics on IOA */ - writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg); + writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32); + + writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32); + if (ioa_cfg->sis64) + writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_mask_reg); - writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg); int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n"); + if (ioa_cfg->sis64) { + ipr_cmd->job_step = ipr_reset_next_stage; + return IPR_RC_JOB_CONTINUE; + } + ipr_cmd->timer.data = (unsigned long) ipr_cmd; ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ); ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout; @@ -7374,7 +7491,7 @@ static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd) if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) { ipr_mask_and_clear_interrupts(ioa_cfg, ~0); - writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg); + writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32); ipr_cmd->job_step = ipr_reset_wait_to_start_bist; } else { ipr_cmd->job_step = ioa_cfg->reset; @@ -8104,15 +8221,23 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg; t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg; + t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32; t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg; + t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32; t->clr_interrupt_reg = base + p->clr_interrupt_reg; + t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32; t->sense_interrupt_reg = base + p->sense_interrupt_reg; + t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32; t->ioarrin_reg = base + p->ioarrin_reg; t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg; + t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32; t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg; + t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32; t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg; + t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32; if (ioa_cfg->sis64) { + t->init_feedback_reg = base + p->init_feedback_reg; t->dump_addr_reg = base + p->dump_addr_reg; t->dump_data_reg = base + p->dump_data_reg; } @@ -8187,7 +8312,7 @@ static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, init_waitqueue_head(&ioa_cfg->msi_wait_q); ioa_cfg->msi_received = 0; ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); - writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg); + writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32); int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); @@ -8198,7 +8323,7 @@ static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, } else if (ipr_debug) dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq); - writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg); + writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32); int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ); ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); @@ -8378,9 +8503,9 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, * If HRRQ updated interrupt is not masked, or reset alert is set, * the card is in an unknown state and needs a hard reset */ - mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg); - interrupts = readl(ioa_cfg->regs.sense_interrupt_reg); - uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg); + mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32); + interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32); + uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT)) ioa_cfg->needs_hard_reset = 1; if (interrupts & IPR_PCII_ERROR_INTERRUPTS) diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 36736b512846..137217e592fe 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -232,6 +232,13 @@ #define IPR_DOORBELL 0x82800000 #define IPR_RUNTIME_RESET 0x40000000 +#define IPR_IPL_INIT_MIN_STAGE_TIME 5 +#define IPR_IPL_INIT_STAGE_UNKNOWN 0x0 +#define IPR_IPL_INIT_STAGE_TRANSOP 0xB0000000 +#define IPR_IPL_INIT_STAGE_MASK 0xff000000 +#define IPR_IPL_INIT_STAGE_TIME_MASK 0x0000ffff +#define IPR_PCII_IPL_STAGE_CHANGE (0x80000000 >> 0) + #define IPR_PCII_IOA_TRANS_TO_OPER (0x80000000 >> 0) #define IPR_PCII_IOARCB_XFER_FAILED (0x80000000 >> 3) #define IPR_PCII_IOA_UNIT_CHECKED (0x80000000 >> 4) @@ -1196,14 +1203,23 @@ struct ipr_misc_cbs { struct ipr_interrupt_offsets { unsigned long set_interrupt_mask_reg; unsigned long clr_interrupt_mask_reg; + unsigned long clr_interrupt_mask_reg32; unsigned long sense_interrupt_mask_reg; + unsigned long sense_interrupt_mask_reg32; unsigned long clr_interrupt_reg; + unsigned long clr_interrupt_reg32; unsigned long sense_interrupt_reg; + unsigned long sense_interrupt_reg32; unsigned long ioarrin_reg; unsigned long sense_uproc_interrupt_reg; + unsigned long sense_uproc_interrupt_reg32; unsigned long set_uproc_interrupt_reg; + unsigned long set_uproc_interrupt_reg32; unsigned long clr_uproc_interrupt_reg; + unsigned long clr_uproc_interrupt_reg32; + + unsigned long init_feedback_reg; unsigned long dump_addr_reg; unsigned long dump_data_reg; @@ -1212,14 +1228,23 @@ struct ipr_interrupt_offsets { struct ipr_interrupts { void __iomem *set_interrupt_mask_reg; void __iomem *clr_interrupt_mask_reg; + void __iomem *clr_interrupt_mask_reg32; void __iomem *sense_interrupt_mask_reg; + void __iomem *sense_interrupt_mask_reg32; void __iomem *clr_interrupt_reg; + void __iomem *clr_interrupt_reg32; void __iomem *sense_interrupt_reg; + void __iomem *sense_interrupt_reg32; void __iomem *ioarrin_reg; void __iomem *sense_uproc_interrupt_reg; + void __iomem *sense_uproc_interrupt_reg32; void __iomem *set_uproc_interrupt_reg; + void __iomem *set_uproc_interrupt_reg32; void __iomem *clr_uproc_interrupt_reg; + void __iomem *clr_uproc_interrupt_reg32; + + void __iomem *init_feedback_reg; void __iomem *dump_addr_reg; void __iomem *dump_data_reg; -- cgit v1.2.3 From 5aa3a333eaae1016f5a72f9e0e2dce39c08762f8 Mon Sep 17 00:00:00 2001 From: Wayne Boyer Date: Fri, 19 Feb 2010 13:24:32 -0800 Subject: [SCSI] ipr: add support for new IOASCs This patch adds support for new errors that can be received from adapters using the next generation 64 bit IOA PCI interface chip. Signed-off-by: Wayne Boyer Signed-off-by: James Bottomley --- drivers/scsi/ipr.c | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 10b162d607ac..7dc9fb1e14e3 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -222,6 +222,20 @@ struct ipr_error_table_t ipr_error_table[] = { "FFFE: Soft device bus error recovered by the IOA"}, {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL, "4101: Soft device bus fabric error"}, + {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL, + "FFFC: Logical block guard error recovered by the device"}, + {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL, + "FFFC: Logical block reference tag error recovered by the device"}, + {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL, + "4171: Recovered scatter list tag / sequence number error"}, + {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL, + "FF3D: Recovered logical block CRC error on IOA to Host transfer"}, + {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL, + "4171: Recovered logical block sequence number error on IOA to Host transfer"}, + {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL, + "FFFD: Recovered logical block reference tag error detected by the IOA"}, + {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL, + "FFFD: Logical block guard error recovered by the IOA"}, {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL, "FFF9: Device sector reassign successful"}, {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL, @@ -278,12 +292,28 @@ struct ipr_error_table_t ipr_error_table[] = { "3120: SCSI bus is not operational"}, {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL, "4100: Hard device bus fabric error"}, + {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL, + "310C: Logical block guard error detected by the device"}, + {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL, + "310C: Logical block reference tag error detected by the device"}, + {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL, + "4170: Scatter list tag / sequence number error"}, + {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL, + "8150: Logical block CRC error on IOA to Host transfer"}, + {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL, + "4170: Logical block sequence number error on IOA to Host transfer"}, + {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL, + "310D: Logical block reference tag error detected by the IOA"}, + {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL, + "310D: Logical block guard error detected by the IOA"}, {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL, "9000: IOA reserved area data check"}, {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL, "9001: IOA reserved area invalid data pattern"}, {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL, "9002: IOA reserved area LRC error"}, + {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL, + "Hardware Error, IOA metadata access error"}, {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL, "102E: Out of alternate sectors for disk storage"}, {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL, @@ -348,6 +378,8 @@ struct ipr_error_table_t ipr_error_table[] = { "Illegal request, commands not allowed to this device"}, {0x05258100, 0, 0, "Illegal request, command not allowed to a secondary adapter"}, + {0x05258200, 0, 0, + "Illegal request, command not allowed to a non-optimized resource"}, {0x05260000, 0, 0, "Illegal request, invalid field in parameter list"}, {0x05260100, 0, 0, -- cgit v1.2.3 From d7b4627f5f3390a2f350f16c047b3fc3eccce6d8 Mon Sep 17 00:00:00 2001 From: Wayne Boyer Date: Fri, 19 Feb 2010 13:24:38 -0800 Subject: [SCSI] ipr: adds PCI ID definitions for new adapters This patch adds the PCI ID definitions for new adapters based on the next generation 64 bit IOA PCI interface chip. New entries have been added to the ipr_pci_table[] array for the adapters and to the ipr_chip[] array for the new versions of the chip. Older entries have been removed for cards that did not ship. Signed-off-by: Wayne Boyer Signed-off-by: James Bottomley --- drivers/scsi/ipr.c | 26 +++++++++++++++++++------- drivers/scsi/ipr.h | 21 ++++++++++++++++----- 2 files changed, 35 insertions(+), 12 deletions(-) diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 7dc9fb1e14e3..c79cd98eb6bf 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -178,7 +178,9 @@ static const struct ipr_chip_t ipr_chip[] = { { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, &ipr_chip_cfg[0] }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] }, - { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] } + { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, &ipr_chip_cfg[2] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, IPR_USE_MSI, IPR_SIS64, &ipr_chip_cfg[2] } }; static int ipr_max_bus_speeds [] = { @@ -8824,9 +8826,6 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = { { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0, IPR_USE_LONG_TRANSOP_TIMEOUT }, - { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, - PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0, - IPR_USE_LONG_TRANSOP_TIMEOUT }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, @@ -8842,9 +8841,22 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = { { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0, IPR_USE_LONG_TRANSOP_TIMEOUT }, - { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SCAMP_E, - PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, - IPR_USE_LONG_TRANSOP_TIMEOUT }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 }, { } }; MODULE_DEVICE_TABLE(pci, ipr_pci_table); diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 137217e592fe..4c267b5e0b96 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -37,8 +37,8 @@ /* * Literals */ -#define IPR_DRIVER_VERSION "2.4.3" -#define IPR_DRIVER_DATE "(June 10, 2009)" +#define IPR_DRIVER_VERSION "2.5.0" +#define IPR_DRIVER_DATE "(February 11, 2010)" /* * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding @@ -55,7 +55,9 @@ #define IPR_NUM_BASE_CMD_BLKS 100 #define PCI_DEVICE_ID_IBM_OBSIDIAN_E 0x0339 -#define PCI_DEVICE_ID_IBM_SCAMP_E 0x034A + +#define PCI_DEVICE_ID_IBM_CROC_FPGA_E2 0x033D +#define PCI_DEVICE_ID_IBM_CROC_ASIC_E2 0x034A #define IPR_SUBS_DEV_ID_2780 0x0264 #define IPR_SUBS_DEV_ID_5702 0x0266 @@ -70,15 +72,24 @@ #define IPR_SUBS_DEV_ID_572A 0x02C1 #define IPR_SUBS_DEV_ID_572B 0x02C2 #define IPR_SUBS_DEV_ID_572F 0x02C3 -#define IPR_SUBS_DEV_ID_574D 0x030B #define IPR_SUBS_DEV_ID_574E 0x030A #define IPR_SUBS_DEV_ID_575B 0x030D #define IPR_SUBS_DEV_ID_575C 0x0338 -#define IPR_SUBS_DEV_ID_575D 0x033E #define IPR_SUBS_DEV_ID_57B3 0x033A #define IPR_SUBS_DEV_ID_57B7 0x0360 #define IPR_SUBS_DEV_ID_57B8 0x02C2 +#define IPR_SUBS_DEV_ID_57B4 0x033B +#define IPR_SUBS_DEV_ID_57B2 0x035F +#define IPR_SUBS_DEV_ID_57C6 0x0357 + +#define IPR_SUBS_DEV_ID_57B5 0x033C +#define IPR_SUBS_DEV_ID_57CE 0x035E +#define IPR_SUBS_DEV_ID_57B1 0x0355 + +#define IPR_SUBS_DEV_ID_574D 0x0356 +#define IPR_SUBS_DEV_ID_575D 0x035D + #define IPR_NAME "ipr" /* -- cgit v1.2.3 From 309ce156aa27f29338438011d292a8d6496623d3 Mon Sep 17 00:00:00 2001 From: Jayamohan Kallickal Date: Sat, 20 Feb 2010 08:02:10 +0530 Subject: [SCSI] libiscsi: Make iscsi_eh_target_reset start with session reset The iscsi_eh_target_reset has been modified to attempt target reset only. If it fails, then iscsi_eh_session_reset will be called. Signed-off-by: Mike Christie Signed-off-by: Jayamohan Kallickal Signed-off-by: James Bottomley --- drivers/infiniband/ulp/iser/iscsi_iser.c | 2 +- drivers/scsi/be2iscsi/be_main.c | 2 +- drivers/scsi/bnx2i/bnx2i_iscsi.c | 2 +- drivers/scsi/cxgb3i/cxgb3i_iscsi.c | 2 +- drivers/scsi/iscsi_tcp.c | 2 +- drivers/scsi/libiscsi.c | 23 +++++++++++++++++++---- include/scsi/libiscsi.h | 3 ++- 7 files changed, 26 insertions(+), 10 deletions(-) diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 5f7a6fca0a4d..5472b7e9abdc 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -596,7 +596,7 @@ static struct scsi_host_template iscsi_iser_sht = { .cmd_per_lun = ISER_DEF_CMD_PER_LUN, .eh_abort_handler = iscsi_eh_abort, .eh_device_reset_handler= iscsi_eh_device_reset, - .eh_target_reset_handler= iscsi_eh_target_reset, + .eh_target_reset_handler = iscsi_eh_recover_target, .target_alloc = iscsi_target_alloc, .use_clustering = DISABLE_CLUSTERING, .proc_name = "iscsi_iser", diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 7c22616ab141..4d269b434a78 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -79,7 +79,7 @@ static struct scsi_host_template beiscsi_sht = { .slave_configure = beiscsi_slave_configure, .target_alloc = iscsi_target_alloc, .eh_device_reset_handler = iscsi_eh_device_reset, - .eh_target_reset_handler = iscsi_eh_target_reset, + .eh_target_reset_handler = iscsi_eh_session_reset, .sg_tablesize = BEISCSI_SGLIST_ELEMENTS, .can_queue = BE2_IO_DEPTH, .this_id = -1, diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index 1c4d1215769d..cb71dc984797 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -1989,7 +1989,7 @@ static struct scsi_host_template bnx2i_host_template = { .queuecommand = iscsi_queuecommand, .eh_abort_handler = iscsi_eh_abort, .eh_device_reset_handler = iscsi_eh_device_reset, - .eh_target_reset_handler = iscsi_eh_target_reset, + .eh_target_reset_handler = iscsi_eh_recover_target, .change_queue_depth = iscsi_change_queue_depth, .can_queue = 1024, .max_sectors = 127, diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c index 412853c65372..b7c30585dadd 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c +++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c @@ -915,7 +915,7 @@ static struct scsi_host_template cxgb3i_host_template = { .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, .eh_abort_handler = iscsi_eh_abort, .eh_device_reset_handler = iscsi_eh_device_reset, - .eh_target_reset_handler = iscsi_eh_target_reset, + .eh_target_reset_handler = iscsi_eh_recover_target, .target_alloc = iscsi_target_alloc, .use_clustering = DISABLE_CLUSTERING, .this_id = -1, diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 8a89ba900588..249053a9d4fa 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -874,7 +874,7 @@ static struct scsi_host_template iscsi_sw_tcp_sht = { .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, .eh_abort_handler = iscsi_eh_abort, .eh_device_reset_handler= iscsi_eh_device_reset, - .eh_target_reset_handler= iscsi_eh_target_reset, + .eh_target_reset_handler = iscsi_eh_recover_target, .use_clustering = DISABLE_CLUSTERING, .slave_alloc = iscsi_sw_tcp_slave_alloc, .slave_configure = iscsi_sw_tcp_slave_configure, diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 703eb6a88790..685eaec53218 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -2338,7 +2338,7 @@ EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout); * This function will wait for a relogin, session termination from * userspace, or a recovery/replacement timeout. */ -static int iscsi_eh_session_reset(struct scsi_cmnd *sc) +int iscsi_eh_session_reset(struct scsi_cmnd *sc) { struct iscsi_cls_session *cls_session; struct iscsi_session *session; @@ -2389,6 +2389,7 @@ failed: mutex_unlock(&session->eh_mutex); return SUCCESS; } +EXPORT_SYMBOL_GPL(iscsi_eh_session_reset); static void iscsi_prep_tgt_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr) { @@ -2403,8 +2404,7 @@ static void iscsi_prep_tgt_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr) * iscsi_eh_target_reset - reset target * @sc: scsi command * - * This will attempt to send a warm target reset. If that fails - * then we will drop the session and attempt ERL0 recovery. + * This will attempt to send a warm target reset. */ int iscsi_eh_target_reset(struct scsi_cmnd *sc) { @@ -2476,12 +2476,27 @@ done: ISCSI_DBG_EH(session, "tgt %s reset result = %s\n", session->targetname, rc == SUCCESS ? "SUCCESS" : "FAILED"); mutex_unlock(&session->eh_mutex); + return rc; +} +EXPORT_SYMBOL_GPL(iscsi_eh_target_reset); +/** + * iscsi_eh_recover_target - reset target and possibly the session + * @sc: scsi command + * + * This will attempt to send a warm target reset. If that fails, + * we will escalate to ERL0 session recovery. + */ +int iscsi_eh_recover_target(struct scsi_cmnd *sc) +{ + int rc; + + rc = iscsi_eh_target_reset(sc); if (rc == FAILED) rc = iscsi_eh_session_reset(sc); return rc; } -EXPORT_SYMBOL_GPL(iscsi_eh_target_reset); +EXPORT_SYMBOL_GPL(iscsi_eh_recover_target); /* * Pre-allocate a pool of @max items of @item_size. By default, the pool diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h index ff92b46f5153..ae5196aae1a5 100644 --- a/include/scsi/libiscsi.h +++ b/include/scsi/libiscsi.h @@ -338,7 +338,8 @@ struct iscsi_host { extern int iscsi_change_queue_depth(struct scsi_device *sdev, int depth, int reason); extern int iscsi_eh_abort(struct scsi_cmnd *sc); -extern int iscsi_eh_target_reset(struct scsi_cmnd *sc); +extern int iscsi_eh_recover_target(struct scsi_cmnd *sc); +extern int iscsi_eh_session_reset(struct scsi_cmnd *sc); extern int iscsi_eh_device_reset(struct scsi_cmnd *sc); extern int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)); -- cgit v1.2.3 From 4183122dbc7c489f11971c5afa8e42011bca7fa2 Mon Sep 17 00:00:00 2001 From: Jayamohan Kallickal Date: Sat, 20 Feb 2010 08:02:39 +0530 Subject: [SCSI] be2iscsi: Cleanup of resets for device and target This patch cleans up device and target reset handling for the driver Signed-off-by: Mike Christie Signed-off-by: Jayamohan Kallickal Signed-off-by: James Bottomley --- drivers/scsi/be2iscsi/be_main.c | 134 +++++++++++++++++++++++++++++++++++++--- drivers/scsi/be2iscsi/be_main.h | 7 +++ drivers/scsi/be2iscsi/be_mgmt.c | 14 +++-- drivers/scsi/be2iscsi/be_mgmt.h | 8 +-- 4 files changed, 145 insertions(+), 18 deletions(-) diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 4d269b434a78..eab31a515de3 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -58,6 +58,123 @@ static int beiscsi_slave_configure(struct scsi_device *sdev) return 0; } +static int beiscsi_eh_abort(struct scsi_cmnd *sc) +{ + struct iscsi_cls_session *cls_session; + struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr; + struct beiscsi_io_task *aborted_io_task; + struct iscsi_conn *conn; + struct beiscsi_conn *beiscsi_conn; + struct beiscsi_hba *phba; + struct iscsi_session *session; + struct invalidate_command_table *inv_tbl; + unsigned int cid, tag, num_invalidate; + + cls_session = starget_to_session(scsi_target(sc->device)); + session = cls_session->dd_data; + + spin_lock_bh(&session->lock); + if (!aborted_task || !aborted_task->sc) { + /* we raced */ + spin_unlock_bh(&session->lock); + return SUCCESS; + } + + aborted_io_task = aborted_task->dd_data; + if (!aborted_io_task->scsi_cmnd) { + /* raced or invalid command */ + spin_unlock_bh(&session->lock); + return SUCCESS; + } + spin_unlock_bh(&session->lock); + conn = aborted_task->conn; + beiscsi_conn = conn->dd_data; + phba = beiscsi_conn->phba; + + /* invalidate iocb */ + cid = beiscsi_conn->beiscsi_conn_cid; + inv_tbl = phba->inv_tbl; + memset(inv_tbl, 0x0, sizeof(*inv_tbl)); + inv_tbl->cid = cid; + inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index; + num_invalidate = 1; + tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, cid); + if (!tag) { + shost_printk(KERN_WARNING, phba->shost, + "mgmt_invalidate_icds could not be" + " submitted\n"); + return FAILED; + } else { + wait_event_interruptible(phba->ctrl.mcc_wait[tag], + phba->ctrl.mcc_numtag[tag]); + free_mcc_tag(&phba->ctrl, tag); + } + + return iscsi_eh_abort(sc); +} + +static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) +{ + struct iscsi_task *abrt_task; + struct beiscsi_io_task *abrt_io_task; + struct iscsi_conn *conn; + struct beiscsi_conn *beiscsi_conn; + struct beiscsi_hba *phba; + struct iscsi_session *session; + struct iscsi_cls_session *cls_session; + struct invalidate_command_table *inv_tbl; + unsigned int cid, tag, i, num_invalidate; + int rc = FAILED; + + /* invalidate iocbs */ + cls_session = starget_to_session(scsi_target(sc->device)); + session = cls_session->dd_data; + spin_lock_bh(&session->lock); + if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) + goto unlock; + + conn = session->leadconn; + beiscsi_conn = conn->dd_data; + phba = beiscsi_conn->phba; + cid = beiscsi_conn->beiscsi_conn_cid; + inv_tbl = phba->inv_tbl; + memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN); + num_invalidate = 0; + for (i = 0; i < conn->session->cmds_max; i++) { + abrt_task = conn->session->cmds[i]; + abrt_io_task = abrt_task->dd_data; + if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE) + continue; + + if (abrt_task->sc->device->lun != abrt_task->sc->device->lun) + continue; + + inv_tbl->cid = cid; + inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index; + num_invalidate++; + inv_tbl++; + } + spin_unlock_bh(&session->lock); + inv_tbl = phba->inv_tbl; + + tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, cid); + if (!tag) { + shost_printk(KERN_WARNING, phba->shost, + "mgmt_invalidate_icds could not be" + " submitted\n"); + return FAILED; + } else { + wait_event_interruptible(phba->ctrl.mcc_wait[tag], + phba->ctrl.mcc_numtag[tag]); + free_mcc_tag(&phba->ctrl, tag); + } + + return iscsi_eh_device_reset(sc); +unlock: + spin_unlock_bh(&session->lock); + return rc; +} + /*------------------- PCI Driver operations and data ----------------- */ static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = { { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, @@ -74,11 +191,11 @@ static struct scsi_host_template beiscsi_sht = { .name = "ServerEngines 10Gbe open-iscsi Initiator Driver", .proc_name = DRV_NAME, .queuecommand = iscsi_queuecommand, - .eh_abort_handler = iscsi_eh_abort, .change_queue_depth = iscsi_change_queue_depth, .slave_configure = beiscsi_slave_configure, .target_alloc = iscsi_target_alloc, - .eh_device_reset_handler = iscsi_eh_device_reset, + .eh_abort_handler = beiscsi_eh_abort, + .eh_device_reset_handler = beiscsi_eh_device_reset, .eh_target_reset_handler = iscsi_eh_session_reset, .sg_tablesize = BEISCSI_SGLIST_ELEMENTS, .can_queue = BE2_IO_DEPTH, @@ -3486,9 +3603,9 @@ static int beiscsi_mtask(struct iscsi_task *task) struct hwi_wrb_context *pwrb_context; struct wrb_handle *pwrb_handle; unsigned int doorbell = 0; - unsigned int i, cid; + struct invalidate_command_table *inv_tbl; struct iscsi_task *aborted_task; - unsigned int tag; + unsigned int i, cid, tag, num_invalidate; cid = beiscsi_conn->beiscsi_conn_cid; pwrb = io_task->pwrb_handle->pwrb; @@ -3538,9 +3655,12 @@ static int beiscsi_mtask(struct iscsi_task *task) if (!aborted_io_task->scsi_cmnd) return 0; - tag = mgmt_invalidate_icds(phba, - aborted_io_task->psgl_handle->sgl_index, - cid); + inv_tbl = phba->inv_tbl; + memset(inv_tbl, 0x0, sizeof(*inv_tbl)); + inv_tbl->cid = cid; + inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index; + num_invalidate = 1; + tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, cid); if (!tag) { shost_printk(KERN_WARNING, phba->shost, "mgmt_invalidate_icds could not be" diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h index c53a80ab796c..d4d31dc09088 100644 --- a/drivers/scsi/be2iscsi/be_main.h +++ b/drivers/scsi/be2iscsi/be_main.h @@ -257,6 +257,11 @@ struct hba_parameters { unsigned int num_sge; }; +struct invalidate_command_table { + unsigned short icd; + unsigned short cid; +} __packed; + struct beiscsi_hba { struct hba_parameters params; struct hwi_controller *phwi_ctrlr; @@ -329,6 +334,8 @@ struct beiscsi_hba { struct work_struct work_cqs; /* The work being queued */ struct be_ctrl_info ctrl; unsigned int generation; + struct invalidate_command_table inv_tbl[128]; + }; struct beiscsi_session { diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index 317bcd042ced..72617b650a7e 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -145,14 +145,15 @@ unsigned char mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute) } unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba, - unsigned int icd, unsigned int cid) + struct invalidate_command_table *inv_tbl, + unsigned int num_invalidate, unsigned int cid) { struct be_dma_mem nonemb_cmd; struct be_ctrl_info *ctrl = &phba->ctrl; struct be_mcc_wrb *wrb; struct be_sge *sge; struct invalidate_commands_params_in *req; - unsigned int tag = 0; + unsigned int i, tag = 0; spin_lock(&ctrl->mbox_lock); tag = alloc_mcc_tag(phba); @@ -183,9 +184,12 @@ unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba, sizeof(*req)); req->ref_handle = 0; req->cleanup_type = CMD_ISCSI_COMMAND_INVALIDATE; - req->icd_count = 0; - req->table[req->icd_count].icd = icd; - req->table[req->icd_count].cid = cid; + for (i = 0; i < num_invalidate; i++) { + req->table[i].icd = inv_tbl->icd; + req->table[i].cid = inv_tbl->cid; + req->icd_count++; + inv_tbl++; + } sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma)); sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF); sge->len = cpu_to_le32(nonemb_cmd.size); diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h index ecead6a5aa56..3d316b82feb1 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.h +++ b/drivers/scsi/be2iscsi/be_mgmt.h @@ -94,7 +94,8 @@ unsigned char mgmt_upload_connection(struct beiscsi_hba *phba, unsigned short cid, unsigned int upload_flag); unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba, - unsigned int icd, unsigned int cid); + struct invalidate_command_table *inv_tbl, + unsigned int num_invalidate, unsigned int cid); struct iscsi_invalidate_connection_params_in { struct be_cmd_req_hdr hdr; @@ -116,11 +117,6 @@ union iscsi_invalidate_connection_params { struct iscsi_invalidate_connection_params_out response; } __packed; -struct invalidate_command_table { - unsigned short icd; - unsigned short cid; -} __packed; - struct invalidate_commands_params_in { struct be_cmd_req_hdr hdr; unsigned int ref_handle; -- cgit v1.2.3 From 944b2fbce26ce39555363fd092386807fa5ea08c Mon Sep 17 00:00:00 2001 From: Jayamohan Kallickal Date: Sat, 20 Feb 2010 08:03:24 +0530 Subject: [SCSI] be2iscsi: Fix for a possible udelay while holding lock This patch fixes a situation where we could call udelay while holding spin_lock Signed-off-by: Jayamohan Kallickal Signed-off-by: James Bottomley --- drivers/scsi/be2iscsi/be_cmds.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c index 67098578fba4..cda6642c7368 100644 --- a/drivers/scsi/be2iscsi/be_cmds.c +++ b/drivers/scsi/be2iscsi/be_cmds.c @@ -32,18 +32,11 @@ void be_mcc_notify(struct beiscsi_hba *phba) unsigned int alloc_mcc_tag(struct beiscsi_hba *phba) { unsigned int tag = 0; - unsigned int num = 0; -mcc_tag_rdy: if (phba->ctrl.mcc_tag_available) { tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index]; phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0; phba->ctrl.mcc_numtag[tag] = 0; - } else { - udelay(100); - num++; - if (num < mcc_timeout) - goto mcc_tag_rdy; } if (tag) { phba->ctrl.mcc_tag_available--; -- cgit v1.2.3 From dafab8e079f432268cca5cf378b92d6acfacc393 Mon Sep 17 00:00:00 2001 From: Jayamohan Kallickal Date: Sat, 20 Feb 2010 08:03:56 +0530 Subject: [SCSI] be2iscsi: cleans up abort handling This patch cleans up abort handling when TMF is sent Signed-off-by: Jayamohan Kallickal Signed-off-by: James Bottomley --- drivers/scsi/be2iscsi/be_main.c | 62 ++++++++--------------------------------- 1 file changed, 11 insertions(+), 51 deletions(-) diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index eab31a515de3..aee3734d8617 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -1063,14 +1063,18 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, case HWH_TYPE_IO: case HWH_TYPE_IO_RD: if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == - ISCSI_OP_NOOP_OUT) { + ISCSI_OP_NOOP_OUT) be_complete_nopin_resp(beiscsi_conn, task, psol); - } else + else be_complete_io(beiscsi_conn, task, psol); break; case HWH_TYPE_LOGOUT: - be_complete_logout(beiscsi_conn, task, psol); + if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) + be_complete_logout(beiscsi_conn, task, psol); + else + be_complete_tmf(beiscsi_conn, task, psol); + break; case HWH_TYPE_LOGIN: @@ -1079,10 +1083,6 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, "- Solicited path \n"); break; - case HWH_TYPE_TMF: - be_complete_tmf(beiscsi_conn, task, psol); - break; - case HWH_TYPE_NOP: be_complete_nopin_resp(beiscsi_conn, task, psol); break; @@ -3593,19 +3593,13 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg, static int beiscsi_mtask(struct iscsi_task *task) { - struct beiscsi_io_task *aborted_io_task, *io_task = task->dd_data; + struct beiscsi_io_task *io_task = task->dd_data; struct iscsi_conn *conn = task->conn; struct beiscsi_conn *beiscsi_conn = conn->dd_data; struct beiscsi_hba *phba = beiscsi_conn->phba; - struct iscsi_session *session; struct iscsi_wrb *pwrb = NULL; - struct hwi_controller *phwi_ctrlr; - struct hwi_wrb_context *pwrb_context; - struct wrb_handle *pwrb_handle; unsigned int doorbell = 0; - struct invalidate_command_table *inv_tbl; - struct iscsi_task *aborted_task; - unsigned int i, cid, tag, num_invalidate; + unsigned int cid; cid = beiscsi_conn->beiscsi_conn_cid; pwrb = io_task->pwrb_handle->pwrb; @@ -3616,6 +3610,7 @@ static int beiscsi_mtask(struct iscsi_task *task) io_task->pwrb_handle->wrb_index); AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, io_task->psgl_handle->sgl_index); + switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { case ISCSI_OP_LOGIN: AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, @@ -3640,36 +3635,6 @@ static int beiscsi_mtask(struct iscsi_task *task) hwi_write_buffer(pwrb, task); break; case ISCSI_OP_SCSI_TMFUNC: - session = conn->session; - i = ((struct iscsi_tm *)task->hdr)->rtt; - phwi_ctrlr = phba->phwi_ctrlr; - pwrb_context = &phwi_ctrlr->wrb_context[cid - - phba->fw_config.iscsi_cid_start]; - pwrb_handle = pwrb_context->pwrb_handle_basestd[be32_to_cpu(i) - >> 16]; - aborted_task = pwrb_handle->pio_handle; - if (!aborted_task) - return 0; - - aborted_io_task = aborted_task->dd_data; - if (!aborted_io_task->scsi_cmnd) - return 0; - - inv_tbl = phba->inv_tbl; - memset(inv_tbl, 0x0, sizeof(*inv_tbl)); - inv_tbl->cid = cid; - inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index; - num_invalidate = 1; - tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, cid); - if (!tag) { - shost_printk(KERN_WARNING, phba->shost, - "mgmt_invalidate_icds could not be" - " submitted\n"); - } else { - wait_event_interruptible(phba->ctrl.mcc_wait[tag], - phba->ctrl.mcc_numtag[tag]); - free_mcc_tag(&phba->ctrl, tag); - } AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_TMF_CMD); AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); @@ -3678,7 +3643,7 @@ static int beiscsi_mtask(struct iscsi_task *task) case ISCSI_OP_LOGOUT: AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, - HWH_TYPE_LOGOUT); + HWH_TYPE_LOGOUT); hwi_write_buffer(pwrb, task); break; @@ -3704,17 +3669,12 @@ static int beiscsi_mtask(struct iscsi_task *task) static int beiscsi_task_xmit(struct iscsi_task *task) { - struct iscsi_conn *conn = task->conn; struct beiscsi_io_task *io_task = task->dd_data; struct scsi_cmnd *sc = task->sc; - struct beiscsi_conn *beiscsi_conn = conn->dd_data; struct scatterlist *sg; int num_sg; unsigned int writedir = 0, xferlen = 0; - SE_DEBUG(DBG_LVL_4, "\n cid=%d In beiscsi_task_xmit task=%p conn=%p \t" - "beiscsi_conn=%p \n", beiscsi_conn->beiscsi_conn_cid, - task, conn, beiscsi_conn); if (!sc) return beiscsi_mtask(task); -- cgit v1.2.3 From 90a289e87648f80b63178c463aa7daaf5205805c Mon Sep 17 00:00:00 2001 From: Jayamohan Kallickal Date: Sat, 20 Feb 2010 08:04:28 +0530 Subject: [SCSI] be2iscsi: Remove debug code This patch removes some debug lines which are unnecessary and also aligns some lines in code Signed-off-by: Jayamohan Kallickal Signed-off-by: James Bottomley --- drivers/scsi/be2iscsi/be_iscsi.c | 4 ++-- drivers/scsi/be2iscsi/be_main.c | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index 29a3aaf35f9f..c3928cb8b042 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c @@ -482,7 +482,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep); if (!tag) { SE_DEBUG(DBG_LVL_1, - "mgmt_invalidate_connection Failed for cid=%d \n", + "mgmt_open_connection Failed for cid=%d \n", beiscsi_ep->ep_cid); } else { wait_event_interruptible(phba->ctrl.mcc_wait[tag], @@ -701,7 +701,7 @@ void beiscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) if (!tag) { SE_DEBUG(DBG_LVL_1, "mgmt_invalidate_connection Failed for cid=%d \n", - beiscsi_ep->ep_cid); + beiscsi_ep->ep_cid); } else { wait_event_interruptible(phba->ctrl.mcc_wait[tag], phba->ctrl.mcc_numtag[tag]); diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index aee3734d8617..b5dca45bae96 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -3779,7 +3779,6 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev, " Failed in beiscsi_hba_alloc \n"); goto disable_pci; } - SE_DEBUG(DBG_LVL_8, " phba = %p \n", phba); switch (pcidev->device) { case BE_DEVICE_ID1: -- cgit v1.2.3 From ed58ea2ab58c7d80a07a829a1cc2c4161c300494 Mon Sep 17 00:00:00 2001 From: Jayamohan Kallickal Date: Sat, 20 Feb 2010 08:05:07 +0530 Subject: [SCSI] be2iscsi: Fixing memory allocation for connection This patch fixes some situations where enough resources were not avaialable when targets exceeded a certain limit Signed-off-by: Jayamohan Kallickal Signed-off-by: James Bottomley --- drivers/scsi/be2iscsi/be_main.c | 4 ++-- drivers/scsi/be2iscsi/be_main.h | 4 +--- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index b5dca45bae96..5887d7a0e3ff 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -359,7 +359,7 @@ static void beiscsi_get_params(struct beiscsi_hba *phba) + BE2_TMFS + BE2_NOPOUT_REQ)); phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count; - phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count;; + phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2; phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;; phba->params.num_sge_per_io = BE2_SGE; phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; @@ -2169,7 +2169,7 @@ static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba) num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) / ((sizeof(struct iscsi_wrb) * phba->params.wrbs_per_cxn)); - for (index = 0; index < phba->params.cxns_per_ctrl; index += 2) { + for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { pwrb_context = &phwi_ctrlr->wrb_context[index]; if (num_cxn_wrb) { for (j = 0; j < phba->params.wrbs_per_cxn; j++) { diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h index d4d31dc09088..87ec21280a37 100644 --- a/drivers/scsi/be2iscsi/be_main.h +++ b/drivers/scsi/be2iscsi/be_main.h @@ -498,8 +498,6 @@ struct hwi_async_entry { struct list_head data_busy_list; }; -#define BE_MIN_ASYNC_ENTRIES 128 - struct hwi_async_pdu_context { struct { struct be_bus_address pa_base; @@ -540,7 +538,7 @@ struct hwi_async_pdu_context { * This is a varying size list! Do not add anything * after this entry!! */ - struct hwi_async_entry async_entry[BE_MIN_ASYNC_ENTRIES]; + struct hwi_async_entry async_entry[BE2_MAX_SESSIONS * 2]; }; #define PDUCQE_CODE_MASK 0x0000003F -- cgit v1.2.3 From c03af1ae1cce97a5530b907ea03625ce6e00214e Mon Sep 17 00:00:00 2001 From: Jayamohan Kallickal Date: Sat, 20 Feb 2010 08:05:43 +0530 Subject: [SCSI] be2iscsi: Alloc only one EQ if intr mode This patch ensures that we alloc only one EQ if we are if we are not in msix mode Signed-off-by: Jayamohan Kallickal Signed-off-by: James Bottomley --- drivers/scsi/be2iscsi/be_main.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 5887d7a0e3ff..fcfb29e02d8a 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -3190,14 +3190,18 @@ static unsigned char hwi_enable_intr(struct beiscsi_hba *phba) reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p \n", reg, addr); iowrite32(reg, addr); - for (i = 0; i <= phba->num_cpus; i++) { - eq = &phwi_context->be_eq[i].q; + if (!phba->msix_enabled) { + eq = &phwi_context->be_eq[0].q; SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id); hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); + } else { + for (i = 0; i <= phba->num_cpus; i++) { + eq = &phwi_context->be_eq[i].q; + SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id); + hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); + } } - } else - shost_printk(KERN_WARNING, phba->shost, - "In hwi_enable_intr, Not Enabled \n"); + } return true; } -- cgit v1.2.3 From 64355b929dec0cb6271e4ac7834c9cf262961e40 Mon Sep 17 00:00:00 2001 From: Brian King Date: Sun, 21 Feb 2010 10:37:57 -0600 Subject: [SCSI] ibmvscsi: Add suspend/resume support Adds support for resuming from suspend for IBM VSCSI devices. We may have lost an interrupt over the suspend, so we just kick the interrupt handler to process anything that is outstanding. We expect to find a transport event indicating we need to reestablish our CRQ. Signed-off-by: Brian King Signed-off-by: James Bottomley --- drivers/scsi/ibmvscsi/ibmvscsi.c | 19 +++++++++++++++++++ drivers/scsi/ibmvscsi/ibmvscsi.h | 1 + drivers/scsi/ibmvscsi/iseries_vscsi.c | 6 ++++++ drivers/scsi/ibmvscsi/rpa_vscsi.c | 13 +++++++++++++ 4 files changed, 39 insertions(+) diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index e3a18e0ef276..dc1bcbe3b176 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -71,6 +71,7 @@ #include #include #include +#include #include #include #include @@ -1990,6 +1991,19 @@ static int ibmvscsi_remove(struct vio_dev *vdev) return 0; } +/** + * ibmvscsi_resume: Resume from suspend + * @dev: device struct + * + * We may have lost an interrupt across suspend/resume, so kick the + * interrupt handler + */ +static int ibmvscsi_resume(struct device *dev) +{ + struct ibmvscsi_host_data *hostdata = dev_get_drvdata(dev); + return ibmvscsi_ops->resume(hostdata); +} + /** * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we * support. @@ -2000,6 +2014,10 @@ static struct vio_device_id ibmvscsi_device_table[] __devinitdata = { }; MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table); +static struct dev_pm_ops ibmvscsi_pm_ops = { + .resume = ibmvscsi_resume +}; + static struct vio_driver ibmvscsi_driver = { .id_table = ibmvscsi_device_table, .probe = ibmvscsi_probe, @@ -2008,6 +2026,7 @@ static struct vio_driver ibmvscsi_driver = { .driver = { .name = "ibmvscsi", .owner = THIS_MODULE, + .pm = &ibmvscsi_pm_ops, } }; diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h index 76425303def0..9cb7c6a773e1 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.h +++ b/drivers/scsi/ibmvscsi/ibmvscsi.h @@ -120,6 +120,7 @@ struct ibmvscsi_ops { struct ibmvscsi_host_data *hostdata); int (*send_crq)(struct ibmvscsi_host_data *hostdata, u64 word1, u64 word2); + int (*resume) (struct ibmvscsi_host_data *hostdata); }; extern struct ibmvscsi_ops iseriesvscsi_ops; diff --git a/drivers/scsi/ibmvscsi/iseries_vscsi.c b/drivers/scsi/ibmvscsi/iseries_vscsi.c index 0775fdee5fa8..f4776451a754 100644 --- a/drivers/scsi/ibmvscsi/iseries_vscsi.c +++ b/drivers/scsi/ibmvscsi/iseries_vscsi.c @@ -158,10 +158,16 @@ static int iseriesvscsi_send_crq(struct ibmvscsi_host_data *hostdata, 0); } +static int iseriesvscsi_resume(struct ibmvscsi_host_data *hostdata) +{ + return 0; +} + struct ibmvscsi_ops iseriesvscsi_ops = { .init_crq_queue = iseriesvscsi_init_crq_queue, .release_crq_queue = iseriesvscsi_release_crq_queue, .reset_crq_queue = iseriesvscsi_reset_crq_queue, .reenable_crq_queue = iseriesvscsi_reenable_crq_queue, .send_crq = iseriesvscsi_send_crq, + .resume = iseriesvscsi_resume, }; diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c index 462a8574dad9..63a30cbbf9de 100644 --- a/drivers/scsi/ibmvscsi/rpa_vscsi.c +++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c @@ -334,10 +334,23 @@ static int rpavscsi_reenable_crq_queue(struct crq_queue *queue, return rc; } +/** + * rpavscsi_resume: - resume after suspend + * @hostdata: ibmvscsi_host_data of host + * + */ +static int rpavscsi_resume(struct ibmvscsi_host_data *hostdata) +{ + vio_disable_interrupts(to_vio_dev(hostdata->dev)); + tasklet_schedule(&hostdata->srp_task); + return 0; +} + struct ibmvscsi_ops rpavscsi_ops = { .init_crq_queue = rpavscsi_init_crq_queue, .release_crq_queue = rpavscsi_release_crq_queue, .reset_crq_queue = rpavscsi_reset_crq_queue, .reenable_crq_queue = rpavscsi_reenable_crq_queue, .send_crq = rpavscsi_send_crq, + .resume = rpavscsi_resume, }; -- cgit v1.2.3 From b0f4d4cf12d0eaa0bd766686bba843fc105b6a60 Mon Sep 17 00:00:00 2001 From: Brian King Date: Sun, 21 Feb 2010 10:37:58 -0600 Subject: [SCSI] ibmvfc: Add suspend/resume support Adds support for resuming from suspend for IBM VFC devices. We may have lost an interrupt over the suspend, so we just kick the interrupt handler to process anything that is outstanding. We expect to find a transport event indicating we need to reestablish our CRQ. Signed-off-by: Brian King Signed-off-by: James Bottomley --- drivers/scsi/ibmvscsi/ibmvfc.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 732f6d35b4a8..4e577e2fee38 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -4735,6 +4736,27 @@ static int ibmvfc_remove(struct vio_dev *vdev) return 0; } +/** + * ibmvfc_resume - Resume from suspend + * @dev: device struct + * + * We may have lost an interrupt across suspend/resume, so kick the + * interrupt handler + * + */ +static int ibmvfc_resume(struct device *dev) +{ + unsigned long flags; + struct ibmvfc_host *vhost = dev_get_drvdata(dev); + struct vio_dev *vdev = to_vio_dev(dev); + + spin_lock_irqsave(vhost->host->host_lock, flags); + vio_disable_interrupts(vdev); + tasklet_schedule(&vhost->tasklet); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + return 0; +} + /** * ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver * @vdev: vio device struct @@ -4755,6 +4777,10 @@ static struct vio_device_id ibmvfc_device_table[] __devinitdata = { }; MODULE_DEVICE_TABLE(vio, ibmvfc_device_table); +static struct dev_pm_ops ibmvfc_pm_ops = { + .resume = ibmvfc_resume +}; + static struct vio_driver ibmvfc_driver = { .id_table = ibmvfc_device_table, .probe = ibmvfc_probe, @@ -4763,6 +4789,7 @@ static struct vio_driver ibmvfc_driver = { .driver = { .name = IBMVFC_NAME, .owner = THIS_MODULE, + .pm = &ibmvfc_pm_ops, } }; -- cgit v1.2.3 From 667e23d4e968f6826dc5d3e81238a7f1f343fb4f Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Thu, 25 Feb 2010 14:02:51 -0600 Subject: [SCSI] hpsa: allow modifying device queue depth. Signed-off-by: Stephen M. Cameron Signed-off-by: James Bottomley --- drivers/scsi/hpsa.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 03697ba94251..745c62444d25 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include #include @@ -134,6 +135,8 @@ static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd, static void hpsa_scan_start(struct Scsi_Host *); static int hpsa_scan_finished(struct Scsi_Host *sh, unsigned long elapsed_time); +static int hpsa_change_queue_depth(struct scsi_device *sdev, + int qdepth, int reason); static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); static int hpsa_slave_alloc(struct scsi_device *sdev); @@ -182,6 +185,7 @@ static struct scsi_host_template hpsa_driver_template = { .queuecommand = hpsa_scsi_queue_command, .scan_start = hpsa_scan_start, .scan_finished = hpsa_scan_finished, + .change_queue_depth = hpsa_change_queue_depth, .this_id = -1, .sg_tablesize = MAXSGENTRIES, .use_clustering = ENABLE_CLUSTERING, @@ -2077,6 +2081,23 @@ static int hpsa_scan_finished(struct Scsi_Host *sh, return finished; } +static int hpsa_change_queue_depth(struct scsi_device *sdev, + int qdepth, int reason) +{ + struct ctlr_info *h = sdev_to_hba(sdev); + + if (reason != SCSI_QDEPTH_DEFAULT) + return -ENOTSUPP; + + if (qdepth < 1) + qdepth = 1; + else + if (qdepth > h->nr_cmds) + qdepth = h->nr_cmds; + scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); + return sdev->queue_depth; +} + static void hpsa_unregister_scsi(struct ctlr_info *h) { /* we are being forcibly unloaded, and may not refuse. */ -- cgit v1.2.3 From f0edafc6628f924a424ab4059df74f46f4f4241e Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Thu, 25 Feb 2010 14:02:56 -0600 Subject: [SCSI] hpsa: fix firmwart typo Signed-off-by: Stephen M. Cameron Signed-off-by: James Bottomley --- drivers/scsi/hpsa.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 745c62444d25..3734f31d08a8 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -2982,7 +2982,7 @@ static irqreturn_t do_hpsa_intr(int irq, void *dev_id) return IRQ_HANDLED; } -/* Send a message CDB to the firmwart. */ +/* Send a message CDB to the firmware. */ static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode, unsigned char type) { -- cgit v1.2.3 From 5512672f75611e9239669c6a4dce648b8d60fedd Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Thu, 25 Feb 2010 14:03:01 -0600 Subject: [SCSI] hpsa: fix scsi status mis-shift The SCSI status does not need to be shifted. Signed-off-by: Stephen M. Cameron Signed-off-by: James Bottomley --- drivers/scsi/hpsa.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 3734f31d08a8..604b4c95a440 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -1006,7 +1006,7 @@ static void complete_scsi_command(struct CommandList *cp, cmd->result = (DID_OK << 16); /* host byte */ cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ - cmd->result |= (ei->ScsiStatus << 1); + cmd->result |= ei->ScsiStatus; /* copy the sense data whether we need to or not. */ memcpy(cmd->sense_buffer, ei->SenseInfo, -- cgit v1.2.3 From e9ea04a65ad842452cbee92b5c865af7fed17f63 Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Thu, 25 Feb 2010 14:03:06 -0600 Subject: [SCSI] hpsa: return -ENOMEM, not -1 Signed-off-by: Stephen M. Cameron Signed-off-by: James Bottomley --- drivers/scsi/hpsa.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 604b4c95a440..a72a18eabe24 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -1386,7 +1386,7 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr) if (c == NULL) { /* trouble... */ dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); - return -1; + return -ENOMEM; } fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG); -- cgit v1.2.3 From 31468401ccf64322ca99fe05fbe64f1551240f57 Mon Sep 17 00:00:00 2001 From: Mike Miller Date: Thu, 25 Feb 2010 14:03:12 -0600 Subject: [SCSI] hpsa: remove scan thread The intent of the scan thread was to allow a UNIT ATTENTION/LUN DATA CHANGED condition encountered in the interrupt handler to trigger a rescan of devices, which can't be done in interrupt context. However, we weren't able to get this to work, due to multiple such UNIT ATTENTION conditions arriving during the rescan, during updating of the SCSI mid layer, etc. There's no way to tell the devices, "stand still while I scan you!" Since it doesn't work, there's no point in having the thread, as the rescan triggered via ioctl or sysfs can be done without such a thread. Signed-off-by: Mike Miller Signed-off-by: Stephen M. Cameron Signed-off-by: James Bottomley --- drivers/scsi/hpsa.c | 167 ++-------------------------------------------------- drivers/scsi/hpsa.h | 3 - 2 files changed, 4 insertions(+), 166 deletions(-) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index a72a18eabe24..3d43bb245ac8 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -53,7 +53,7 @@ #include "hpsa.h" /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ -#define HPSA_DRIVER_VERSION "2.0.1-3" +#define HPSA_DRIVER_VERSION "2.0.2-1" #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" /* How long to wait (in milliseconds) for board to go into simple mode */ @@ -212,133 +212,6 @@ static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh) return (struct ctlr_info *) *priv; } -static struct task_struct *hpsa_scan_thread; -static DEFINE_MUTEX(hpsa_scan_mutex); -static LIST_HEAD(hpsa_scan_q); -static int hpsa_scan_func(void *data); - -/** - * add_to_scan_list() - add controller to rescan queue - * @h: Pointer to the controller. - * - * Adds the controller to the rescan queue if not already on the queue. - * - * returns 1 if added to the queue, 0 if skipped (could be on the - * queue already, or the controller could be initializing or shutting - * down). - **/ -static int add_to_scan_list(struct ctlr_info *h) -{ - struct ctlr_info *test_h; - int found = 0; - int ret = 0; - - if (h->busy_initializing) - return 0; - - /* - * If we don't get the lock, it means the driver is unloading - * and there's no point in scheduling a new scan. - */ - if (!mutex_trylock(&h->busy_shutting_down)) - return 0; - - mutex_lock(&hpsa_scan_mutex); - list_for_each_entry(test_h, &hpsa_scan_q, scan_list) { - if (test_h == h) { - found = 1; - break; - } - } - if (!found && !h->busy_scanning) { - INIT_COMPLETION(h->scan_wait); - list_add_tail(&h->scan_list, &hpsa_scan_q); - ret = 1; - } - mutex_unlock(&hpsa_scan_mutex); - mutex_unlock(&h->busy_shutting_down); - - return ret; -} - -/** - * remove_from_scan_list() - remove controller from rescan queue - * @h: Pointer to the controller. - * - * Removes the controller from the rescan queue if present. Blocks if - * the controller is currently conducting a rescan. The controller - * can be in one of three states: - * 1. Doesn't need a scan - * 2. On the scan list, but not scanning yet (we remove it) - * 3. Busy scanning (and not on the list). In this case we want to wait for - * the scan to complete to make sure the scanning thread for this - * controller is completely idle. - **/ -static void remove_from_scan_list(struct ctlr_info *h) -{ - struct ctlr_info *test_h, *tmp_h; - - mutex_lock(&hpsa_scan_mutex); - list_for_each_entry_safe(test_h, tmp_h, &hpsa_scan_q, scan_list) { - if (test_h == h) { /* state 2. */ - list_del(&h->scan_list); - complete_all(&h->scan_wait); - mutex_unlock(&hpsa_scan_mutex); - return; - } - } - if (h->busy_scanning) { /* state 3. */ - mutex_unlock(&hpsa_scan_mutex); - wait_for_completion(&h->scan_wait); - } else { /* state 1, nothing to do. */ - mutex_unlock(&hpsa_scan_mutex); - } -} - -/* hpsa_scan_func() - kernel thread used to rescan controllers - * @data: Ignored. - * - * A kernel thread used scan for drive topology changes on - * controllers. The thread processes only one controller at a time - * using a queue. Controllers are added to the queue using - * add_to_scan_list() and removed from the queue either after done - * processing or using remove_from_scan_list(). - * - * returns 0. - **/ -static int hpsa_scan_func(__attribute__((unused)) void *data) -{ - struct ctlr_info *h; - int host_no; - - while (1) { - set_current_state(TASK_INTERRUPTIBLE); - schedule(); - if (kthread_should_stop()) - break; - - while (1) { - mutex_lock(&hpsa_scan_mutex); - if (list_empty(&hpsa_scan_q)) { - mutex_unlock(&hpsa_scan_mutex); - break; - } - h = list_entry(hpsa_scan_q.next, struct ctlr_info, - scan_list); - list_del(&h->scan_list); - h->busy_scanning = 1; - mutex_unlock(&hpsa_scan_mutex); - host_no = h->scsi_host ? h->scsi_host->host_no : -1; - hpsa_scan_start(h->scsi_host); - complete_all(&h->scan_wait); - mutex_lock(&hpsa_scan_mutex); - h->busy_scanning = 0; - mutex_unlock(&hpsa_scan_mutex); - } - } - return 0; -} - static int check_for_unit_attention(struct ctlr_info *h, struct CommandList *c) { @@ -356,21 +229,8 @@ static int check_for_unit_attention(struct ctlr_info *h, break; case REPORT_LUNS_CHANGED: dev_warn(&h->pdev->dev, "hpsa%d: report LUN data " - "changed\n", h->ctlr); + "changed, action required\n", h->ctlr); /* - * Here, we could call add_to_scan_list and wake up the scan thread, - * except that it's quite likely that we will get more than one - * REPORT_LUNS_CHANGED condition in quick succession, which means - * that those which occur after the first one will likely happen - * *during* the hpsa_scan_thread's rescan. And the rescan code is not - * robust enough to restart in the middle, undoing what it has already - * done, and it's not clear that it's even possible to do this, since - * part of what it does is notify the SCSI mid layer, which starts - * doing it's own i/o to read partition tables and so on, and the - * driver doesn't have visibility to know what might need undoing. - * In any event, if possible, it is horribly complicated to get right - * so we just don't do it for now. - * * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012. */ break; @@ -397,10 +257,7 @@ static ssize_t host_store_rescan(struct device *dev, struct ctlr_info *h; struct Scsi_Host *shost = class_to_shost(dev); h = shost_to_hba(shost); - if (add_to_scan_list(h)) { - wake_up_process(hpsa_scan_thread); - wait_for_completion_interruptible(&h->scan_wait); - } + hpsa_scan_start(h->scsi_host); return count; } @@ -3553,8 +3410,6 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev, h->busy_initializing = 1; INIT_HLIST_HEAD(&h->cmpQ); INIT_HLIST_HEAD(&h->reqQ); - mutex_init(&h->busy_shutting_down); - init_completion(&h->scan_wait); rc = hpsa_pci_init(h, pdev); if (rc != 0) goto clean1; @@ -3702,8 +3557,6 @@ static void __devexit hpsa_remove_one(struct pci_dev *pdev) return; } h = pci_get_drvdata(pdev); - mutex_lock(&h->busy_shutting_down); - remove_from_scan_list(h); hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ hpsa_shutdown(pdev); iounmap(h->vaddr); @@ -3724,7 +3577,6 @@ static void __devexit hpsa_remove_one(struct pci_dev *pdev) */ pci_release_regions(pdev); pci_set_drvdata(pdev, NULL); - mutex_unlock(&h->busy_shutting_down); kfree(h); } @@ -3878,23 +3730,12 @@ clean_up: */ static int __init hpsa_init(void) { - int err; - /* Start the scan thread */ - hpsa_scan_thread = kthread_run(hpsa_scan_func, NULL, "hpsa_scan"); - if (IS_ERR(hpsa_scan_thread)) { - err = PTR_ERR(hpsa_scan_thread); - return -ENODEV; - } - err = pci_register_driver(&hpsa_pci_driver); - if (err) - kthread_stop(hpsa_scan_thread); - return err; + return pci_register_driver(&hpsa_pci_driver); } static void __exit hpsa_cleanup(void) { pci_unregister_driver(&hpsa_pci_driver); - kthread_stop(hpsa_scan_thread); } module_init(hpsa_init); diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index a0502b3ac17e..fc15215145d9 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h @@ -97,9 +97,6 @@ struct ctlr_info { int scan_finished; spinlock_t scan_lock; wait_queue_head_t scan_wait_queue; - struct mutex busy_shutting_down; - struct list_head scan_list; - struct completion scan_wait; struct Scsi_Host *scsi_host; spinlock_t devlock; /* to protect hba[ctlr]->dev[]; */ -- cgit v1.2.3 From ff9fea94546afa2a496c15354533f06088347f6e Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Thu, 25 Feb 2010 14:03:17 -0600 Subject: [SCSI] hpsa: mark hpsa_pci_init as __devinit Signed-off-by: Stephen M. Cameron Signed-off-by: James Bottomley --- drivers/scsi/hpsa.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 3d43bb245ac8..2e1edce9b20e 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -3174,7 +3174,7 @@ default_int_mode: h->intr[PERF_MODE_INT] = pdev->irq; } -static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev) +static int __devinit hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev) { ushort subsystem_vendor_id, subsystem_device_id, command; u32 board_id, scratchpad = 0; -- cgit v1.2.3 From db61bfcfe2a68dc71402c270686cd73b80971efc Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Thu, 25 Feb 2010 14:03:22 -0600 Subject: [SCSI] hpsa: Clarify calculation of padding for commandlist structure Signed-off-by: Stephen M. Cameron Signed-off-by: James Bottomley --- drivers/scsi/hpsa_cmd.h | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h index 3e0abdf76689..43b6f1cffe34 100644 --- a/drivers/scsi/hpsa_cmd.h +++ b/drivers/scsi/hpsa_cmd.h @@ -313,12 +313,18 @@ struct CommandList { void *scsi_cmd; /* on 64 bit architectures, to get this to be 32-byte-aligned - * it so happens we need no padding, on 32 bit systems, - * we need 8 bytes of padding. This does that. + * it so happens we need PAD_64 bytes of padding, on 32 bit systems, + * we need PAD_32 bytes of padding (see below). This does that. + * If it happens that 64 bit and 32 bit systems need different + * padding, PAD_32 and PAD_64 can be set independently, and. + * the code below will do the right thing. */ -#define COMMANDLIST_PAD ((8 - sizeof(long))/4 * 8) +#define IS_32_BIT ((8 - sizeof(long))/4) +#define IS_64_BIT (!IS_32_BIT) +#define PAD_32 (8) +#define PAD_64 (0) +#define COMMANDLIST_PAD (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64) u8 pad[COMMANDLIST_PAD]; - }; /* Configuration Table Structure */ -- cgit v1.2.3 From 33a2ffce51d9598380d73c515a27fc6cff3bd9c4 Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Thu, 25 Feb 2010 14:03:27 -0600 Subject: [SCSI] hpsa: Increase the number of scatter gather elements supported. This uses the scatter-gather chaining feature of Smart Array controllers. 32 scatter-gather elements are embedded in the "command list", and the last element in the list may be marked as a "chain pointer", and point to an additional block of scatter gather elements. The precise number of scatter gather elements supported is dependent on the particular kind of Smart Array, and is determined at runtime by querying the hardware. Signed-off-by: Stephen M. Cameron Signed-off-by: James Bottomley --- drivers/scsi/hpsa.c | 134 ++++++++++++++++++++++++++++++++++++++++++++---- drivers/scsi/hpsa.h | 4 ++ drivers/scsi/hpsa_cmd.h | 7 +-- 3 files changed, 131 insertions(+), 14 deletions(-) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 2e1edce9b20e..183d3a43c280 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -187,7 +187,6 @@ static struct scsi_host_template hpsa_driver_template = { .scan_finished = hpsa_scan_finished, .change_queue_depth = hpsa_change_queue_depth, .this_id = -1, - .sg_tablesize = MAXSGENTRIES, .use_clustering = ENABLE_CLUSTERING, .eh_device_reset_handler = hpsa_eh_device_reset_handler, .ioctl = hpsa_ioctl, @@ -844,6 +843,76 @@ static void hpsa_scsi_setup(struct ctlr_info *h) spin_lock_init(&h->devlock); } +static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) +{ + int i; + + if (!h->cmd_sg_list) + return; + for (i = 0; i < h->nr_cmds; i++) { + kfree(h->cmd_sg_list[i]); + h->cmd_sg_list[i] = NULL; + } + kfree(h->cmd_sg_list); + h->cmd_sg_list = NULL; +} + +static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h) +{ + int i; + + if (h->chainsize <= 0) + return 0; + + h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds, + GFP_KERNEL); + if (!h->cmd_sg_list) + return -ENOMEM; + for (i = 0; i < h->nr_cmds; i++) { + h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) * + h->chainsize, GFP_KERNEL); + if (!h->cmd_sg_list[i]) + goto clean; + } + return 0; + +clean: + hpsa_free_sg_chain_blocks(h); + return -ENOMEM; +} + +static void hpsa_map_sg_chain_block(struct ctlr_info *h, + struct CommandList *c) +{ + struct SGDescriptor *chain_sg, *chain_block; + u64 temp64; + + chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; + chain_block = h->cmd_sg_list[c->cmdindex]; + chain_sg->Ext = HPSA_SG_CHAIN; + chain_sg->Len = sizeof(*chain_sg) * + (c->Header.SGTotal - h->max_cmd_sg_entries); + temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len, + PCI_DMA_TODEVICE); + chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL); + chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL); +} + +static void hpsa_unmap_sg_chain_block(struct ctlr_info *h, + struct CommandList *c) +{ + struct SGDescriptor *chain_sg; + union u64bit temp64; + + if (c->Header.SGTotal <= h->max_cmd_sg_entries) + return; + + chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; + temp64.val32.lower = chain_sg->Addr.lower; + temp64.val32.upper = chain_sg->Addr.upper; + pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE); +} + static void complete_scsi_command(struct CommandList *cp, int timeout, u32 tag) { @@ -860,6 +929,8 @@ static void complete_scsi_command(struct CommandList *cp, h = cp->h; scsi_dma_unmap(cmd); /* undo the DMA mappings */ + if (cp->Header.SGTotal > h->max_cmd_sg_entries) + hpsa_unmap_sg_chain_block(h, cp); cmd->result = (DID_OK << 16); /* host byte */ cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ @@ -1064,6 +1135,7 @@ static int hpsa_scsi_detect(struct ctlr_info *h) sh->max_id = HPSA_MAX_LUN; sh->can_queue = h->nr_cmds; sh->cmd_per_lun = h->nr_cmds; + sh->sg_tablesize = h->maxsgentries; h->scsi_host = sh; sh->hostdata[0] = (unsigned long) h; sh->irq = h->intr[PERF_MODE_INT]; @@ -1765,16 +1837,17 @@ out: * dma mapping and fills in the scatter gather entries of the * hpsa command, cp. */ -static int hpsa_scatter_gather(struct pci_dev *pdev, +static int hpsa_scatter_gather(struct ctlr_info *h, struct CommandList *cp, struct scsi_cmnd *cmd) { unsigned int len; struct scatterlist *sg; u64 addr64; - int use_sg, i; + int use_sg, i, sg_index, chained; + struct SGDescriptor *curr_sg; - BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES); + BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); use_sg = scsi_dma_map(cmd); if (use_sg < 0) @@ -1783,15 +1856,33 @@ static int hpsa_scatter_gather(struct pci_dev *pdev, if (!use_sg) goto sglist_finished; + curr_sg = cp->SG; + chained = 0; + sg_index = 0; scsi_for_each_sg(cmd, sg, use_sg, i) { + if (i == h->max_cmd_sg_entries - 1 && + use_sg > h->max_cmd_sg_entries) { + chained = 1; + curr_sg = h->cmd_sg_list[cp->cmdindex]; + sg_index = 0; + } addr64 = (u64) sg_dma_address(sg); len = sg_dma_len(sg); - cp->SG[i].Addr.lower = - (u32) (addr64 & (u64) 0x00000000FFFFFFFF); - cp->SG[i].Addr.upper = - (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF); - cp->SG[i].Len = len; - cp->SG[i].Ext = 0; /* we are not chaining */ + curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL); + curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL); + curr_sg->Len = len; + curr_sg->Ext = 0; /* we are not chaining */ + curr_sg++; + } + + if (use_sg + chained > h->maxSG) + h->maxSG = use_sg + chained; + + if (chained) { + cp->Header.SGList = h->max_cmd_sg_entries; + cp->Header.SGTotal = (u16) (use_sg + 1); + hpsa_map_sg_chain_block(h, cp); + return 0; } sglist_finished: @@ -1887,7 +1978,7 @@ static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd, break; } - if (hpsa_scatter_gather(h->pdev, c, cmd) < 0) { /* Fill SG list */ + if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */ cmd_free(h, c); return SCSI_MLQUEUE_HOST_BUSY; } @@ -3283,6 +3374,23 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev) h->board_id = board_id; h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); + h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements)); + + /* + * Limit in-command s/g elements to 32 save dma'able memory. + * Howvever spec says if 0, use 31 + */ + + h->max_cmd_sg_entries = 31; + if (h->maxsgentries > 512) { + h->max_cmd_sg_entries = 32; + h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1; + h->maxsgentries--; /* save one for chain pointer */ + } else { + h->maxsgentries = 31; /* default to traditional values */ + h->chainsize = 0; + } + h->product_name = products[prod_index].product_name; h->access = *(products[prod_index].access); /* Allow room for some ioctls */ @@ -3463,6 +3571,8 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev, rc = -ENOMEM; goto clean4; } + if (hpsa_allocate_sg_chain_blocks(h)) + goto clean4; spin_lock_init(&h->lock); spin_lock_init(&h->scan_lock); init_waitqueue_head(&h->scan_wait_queue); @@ -3485,6 +3595,7 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev, return 1; clean4: + hpsa_free_sg_chain_blocks(h); kfree(h->cmd_pool_bits); if (h->cmd_pool) pci_free_consistent(h->pdev, @@ -3560,6 +3671,7 @@ static void __devexit hpsa_remove_one(struct pci_dev *pdev) hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ hpsa_shutdown(pdev); iounmap(h->vaddr); + hpsa_free_sg_chain_blocks(h); pci_free_consistent(h->pdev, h->nr_cmds * sizeof(struct CommandList), h->cmd_pool, h->cmd_pool_dhandle); diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index fc15215145d9..1bb5233b09a0 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h @@ -83,6 +83,10 @@ struct ctlr_info { unsigned int maxQsinceinit; unsigned int maxSG; spinlock_t lock; + int maxsgentries; + u8 max_cmd_sg_entries; + int chainsize; + struct SGDescriptor **cmd_sg_list; /* pointers to command and error info pool */ struct CommandList *cmd_pool; diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h index 43b6f1cffe34..cb0c2385f3f6 100644 --- a/drivers/scsi/hpsa_cmd.h +++ b/drivers/scsi/hpsa_cmd.h @@ -23,7 +23,8 @@ /* general boundary defintions */ #define SENSEINFOBYTES 32 /* may vary between hbas */ -#define MAXSGENTRIES 31 +#define MAXSGENTRIES 32 +#define HPSA_SG_CHAIN 0x80000000 #define MAXREPLYQS 256 /* Command Status value */ @@ -321,8 +322,8 @@ struct CommandList { */ #define IS_32_BIT ((8 - sizeof(long))/4) #define IS_64_BIT (!IS_32_BIT) -#define PAD_32 (8) -#define PAD_64 (0) +#define PAD_32 (24) +#define PAD_64 (16) #define COMMANDLIST_PAD (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64) u8 pad[COMMANDLIST_PAD]; }; -- cgit v1.2.3 From 43aebfa12e7631124472237dc945c27af54ca646 Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Thu, 25 Feb 2010 14:03:32 -0600 Subject: [SCSI] hpsa: remove unused members next, prev, and retry_count from command list structure. Signed-off-by: Stephen M. Cameron Signed-off-by: James Bottomley --- drivers/scsi/hpsa_cmd.h | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h index cb0c2385f3f6..56fb9827681e 100644 --- a/drivers/scsi/hpsa_cmd.h +++ b/drivers/scsi/hpsa_cmd.h @@ -306,11 +306,8 @@ struct CommandList { int cmd_type; long cmdindex; struct hlist_node list; - struct CommandList *prev; - struct CommandList *next; struct request *rq; struct completion *waiting; - int retry_count; void *scsi_cmd; /* on 64 bit architectures, to get this to be 32-byte-aligned @@ -322,8 +319,8 @@ struct CommandList { */ #define IS_32_BIT ((8 - sizeof(long))/4) #define IS_64_BIT (!IS_32_BIT) -#define PAD_32 (24) -#define PAD_64 (16) +#define PAD_32 (4) +#define PAD_64 (4) #define COMMANDLIST_PAD (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64) u8 pad[COMMANDLIST_PAD]; }; -- cgit v1.2.3 From 9f1177a3f8eee22427eb97e6e00b62ff0be2871f Mon Sep 17 00:00:00 2001 From: James Smart Date: Fri, 26 Feb 2010 14:12:57 -0500 Subject: [SCSI] lpfc 8.3.10: Fix Initialization issues - Add NULL checks to the pointers for the config_async mailbox and dump_wakeup_params mailbox. - Add code to check return value of lpfc_read_sparams everywhere and handle failures appropriately. Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc_hbadisc.c | 32 ++++++++++++++++++-------------- drivers/scsi/lpfc/lpfc_init.c | 19 +++++++++++++++++-- drivers/scsi/lpfc/lpfc_sli.c | 8 +++++++- drivers/scsi/lpfc/lpfc_vport.c | 7 ++++++- 4 files changed, 48 insertions(+), 18 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 2359d0bfb734..e58d8aeec09e 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -2024,8 +2024,6 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) int rc; struct fcf_record *fcf_record; - sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - spin_lock_irq(&phba->hbalock); switch (la->UlnkSpeed) { case LA_1GHZ_LINK: @@ -2117,18 +2115,24 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) spin_unlock_irq(&phba->hbalock); lpfc_linkup(phba); - if (sparam_mbox) { - lpfc_read_sparam(phba, sparam_mbox, 0); - sparam_mbox->vport = vport; - sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; - rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT); - if (rc == MBX_NOT_FINISHED) { - mp = (struct lpfc_dmabuf *) sparam_mbox->context1; - lpfc_mbuf_free(phba, mp->virt, mp->phys); - kfree(mp); - mempool_free(sparam_mbox, phba->mbox_mem_pool); - goto out; - } + sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!sparam_mbox) + goto out; + + rc = lpfc_read_sparam(phba, sparam_mbox, 0); + if (rc) { + mempool_free(sparam_mbox, phba->mbox_mem_pool); + goto out; + } + sparam_mbox->vport = vport; + sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; + rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + mp = (struct lpfc_dmabuf *) sparam_mbox->context1; + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + mempool_free(sparam_mbox, phba->mbox_mem_pool); + goto out; } if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) { diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index d29ac7c317d9..b64cecafa7ab 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -350,7 +350,12 @@ lpfc_config_port_post(struct lpfc_hba *phba) mb = &pmb->u.mb; /* Get login parameters for NID. */ - lpfc_read_sparam(phba, pmb, 0); + rc = lpfc_read_sparam(phba, pmb, 0); + if (rc) { + mempool_free(pmb, phba->mbox_mem_pool); + return -ENOMEM; + } + pmb->vport = vport; if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -359,7 +364,7 @@ lpfc_config_port_post(struct lpfc_hba *phba) mb->mbxCommand, mb->mbxStatus); phba->link_state = LPFC_HBA_ERROR; mp = (struct lpfc_dmabuf *) pmb->context1; - mempool_free( pmb, phba->mbox_mem_pool); + mempool_free(pmb, phba->mbox_mem_pool); lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); return -EIO; @@ -571,6 +576,11 @@ lpfc_config_port_post(struct lpfc_hba *phba) } /* MBOX buffer will be freed in mbox compl */ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmb) { + phba->link_state = LPFC_HBA_ERROR; + return -ENOMEM; + } + lpfc_config_async(phba, pmb, LPFC_ELS_RING); pmb->mbox_cmpl = lpfc_config_async_cmpl; pmb->vport = phba->pport; @@ -588,6 +598,11 @@ lpfc_config_port_post(struct lpfc_hba *phba) /* Get Option rom version */ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmb) { + phba->link_state = LPFC_HBA_ERROR; + return -ENOMEM; + } + lpfc_dump_wakeup_param(phba, pmb); pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; pmb->vport = phba->pport; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 35e3b96d4e07..d51ee7e3273b 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -4388,7 +4388,13 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) spin_unlock_irq(&phba->hbalock); /* Read the port's service parameters. */ - lpfc_read_sparam(phba, mboxq, vport->vpi); + rc = lpfc_read_sparam(phba, mboxq, vport->vpi); + if (rc) { + phba->link_state = LPFC_HBA_ERROR; + rc = -ENOMEM; + goto out_free_vpd; + } + mboxq->vport = vport; rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); mp = (struct lpfc_dmabuf *) mboxq->context1; diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index dc86e873102a..869f76cbc58a 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -123,7 +123,12 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport) } mb = &pmb->u.mb; - lpfc_read_sparam(phba, pmb, vport->vpi); + rc = lpfc_read_sparam(phba, pmb, vport->vpi); + if (rc) { + mempool_free(pmb, phba->mbox_mem_pool); + return -ENOMEM; + } + /* * Grab buffer pointer and clear context1 so we can use * lpfc_sli_issue_box_wait -- cgit v1.2.3 From e40a02c12581f710877da372b5d7e15b68a1c5c3 Mon Sep 17 00:00:00 2001 From: James Smart Date: Fri, 26 Feb 2010 14:13:54 -0500 Subject: [SCSI] lpfc 8.3.10: Fix user interface issues - Add Logging message for critial errors. - Remove unused variable from lpfc_nodev_tmo_show - Update supress_link_up parameter with #define values. Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc.h | 3 +++ drivers/scsi/lpfc/lpfc_attr.c | 7 ++++--- drivers/scsi/lpfc/lpfc_els.c | 21 ++++++++++++++++++--- drivers/scsi/lpfc/lpfc_init.c | 4 ++-- drivers/scsi/lpfc/lpfc_scsi.c | 14 ++++++++++++-- drivers/scsi/lpfc/lpfc_sli.c | 11 ++++++++++- 6 files changed, 49 insertions(+), 11 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 84b696463a58..ce0599dcc814 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -623,6 +623,9 @@ struct lpfc_hba { uint32_t cfg_log_verbose; uint32_t cfg_aer_support; uint32_t cfg_suppress_link_up; +#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */ +#define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */ +#define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */ lpfc_vpd_t vpd; /* vital product data */ diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index c992e8328f9e..64cd17eedb64 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1939,7 +1939,9 @@ static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, # 0x2 = never bring up link # Default value is 0. */ -LPFC_ATTR_R(suppress_link_up, 0, 0, 2, "Suppress Link Up at initialization"); +LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK, + LPFC_DELAY_INIT_LINK_INDEFINITELY, + "Suppress Link Up at initialization"); /* # lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear @@ -1966,8 +1968,7 @@ lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr, { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; - int val = 0; - val = vport->cfg_devloss_tmo; + return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo); } diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 08b6634cb994..4623323da577 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -806,9 +806,8 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, } /* FLOGI failure */ - lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "0100 FLOGI failure Data: x%x x%x " - "x%x\n", + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n", irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpTimeout); goto flogifail; @@ -1409,6 +1408,10 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto out; } /* PLOGI failed */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "2753 PLOGI failure DID:%06X Status:x%x/x%x\n", + ndlp->nlp_DID, irsp->ulpStatus, + irsp->un.ulpWord[4]); /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ if (lpfc_error_lost_link(irsp)) rc = NLP_STE_FREED_NODE; @@ -1577,6 +1580,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto out; } /* PRLI failed */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "2754 PRLI failure DID:%06X Status:x%x/x%x\n", + ndlp->nlp_DID, irsp->ulpStatus, + irsp->un.ulpWord[4]); /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ if (lpfc_error_lost_link(irsp)) goto out; @@ -1860,6 +1867,10 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto out; } /* ADISC failed */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "2755 ADISC failure DID:%06X Status:x%x/x%x\n", + ndlp->nlp_DID, irsp->ulpStatus, + irsp->un.ulpWord[4]); /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ if (!lpfc_error_lost_link(irsp)) lpfc_disc_state_machine(vport, ndlp, cmdiocb, @@ -2009,6 +2020,10 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* ELS command is being retried */ goto out; /* LOGO failed */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "2756 LOGO failure DID:%06X Status:x%x/x%x\n", + ndlp->nlp_DID, irsp->ulpStatus, + irsp->un.ulpWord[4]); /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ if (lpfc_error_lost_link(irsp)) goto out; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index b64cecafa7ab..437ddc92ebea 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -549,7 +549,7 @@ lpfc_config_port_post(struct lpfc_hba *phba) mempool_free(pmb, phba->mbox_mem_pool); return -EIO; } - } else if (phba->cfg_suppress_link_up == 0) { + } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; @@ -667,7 +667,7 @@ lpfc_hba_init_link(struct lpfc_hba *phba) mempool_free(pmb, phba->mbox_mem_pool); return -EIO; } - phba->cfg_suppress_link_up = 0; + phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; return 0; } diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 7f21b47db791..889a7b9ec92b 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -2079,8 +2079,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, if (resp_info & RSP_LEN_VALID) { rsplen = be32_to_cpu(fcprsp->rspRspLen); - if ((rsplen != 0 && rsplen != 4 && rsplen != 8) || - (fcprsp->rspInfo3 != RSP_NO_FAILURE)) { + if (rsplen != 0 && rsplen != 4 && rsplen != 8) { lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, "2719 Invalid response length: " "tgt x%x lun x%x cmnd x%x rsplen x%x\n", @@ -2090,6 +2089,17 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, host_status = DID_ERROR; goto out; } + if (fcprsp->rspInfo3 != RSP_NO_FAILURE) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "2757 Protocol failure detected during " + "processing of FCP I/O op: " + "tgt x%x lun x%x cmnd x%x rspInfo3 x%x\n", + cmnd->device->id, + cmnd->device->lun, cmnd->cmnd[0], + fcprsp->rspInfo3); + host_status = DID_ERROR; + goto out; + } } if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index d51ee7e3273b..49bed3e8c95d 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -3091,6 +3091,12 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) /* Check to see if any errors occurred during init */ if ((status & HS_FFERM) || (i >= 20)) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2751 Adapter failed to restart, " + "status reg x%x, FW Data: A8 x%x AC x%x\n", + status, + readl(phba->MBslimaddr + 0xa8), + readl(phba->MBslimaddr + 0xac)); phba->link_state = LPFC_HBA_ERROR; retval = 1; } @@ -3278,6 +3284,9 @@ lpfc_sli_brdkill(struct lpfc_hba *phba) if (retval != MBX_SUCCESS) { if (retval != MBX_BUSY) mempool_free(pmb, phba->mbox_mem_pool); + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2752 KILL_BOARD command failed retval %d\n", + retval); spin_lock_irq(&phba->hbalock); phba->link_flag &= ~LS_IGNORE_ERATT; spin_unlock_irq(&phba->hbalock); @@ -4035,7 +4044,7 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba) lpfc_sli_hba_setup_error: phba->link_state = LPFC_HBA_ERROR; - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0445 Firmware initialization failed\n"); return rc; } -- cgit v1.2.3 From 0f65ff680f90281d49ee864965f06774eba9657d Mon Sep 17 00:00:00 2001 From: James Smart Date: Fri, 26 Feb 2010 14:14:23 -0500 Subject: [SCSI] lpfc 8.3.10: Update SLI interface areas - Clear LPFC_DRIVER_ABORTED on FCP command completion. - Clear exchange busy flag when I/O is aborted and found on aborted list. - Free sglq when XRI_ABORTED event is processed before release of IOCB. - Only process iocb as aborted when LPFC_DRIVER_ABORTED is set. Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc.h | 1 - drivers/scsi/lpfc/lpfc_crtn.h | 2 +- drivers/scsi/lpfc/lpfc_els.c | 23 +++++--- drivers/scsi/lpfc/lpfc_init.c | 7 +++ drivers/scsi/lpfc/lpfc_scsi.c | 35 +++++++++--- drivers/scsi/lpfc/lpfc_sli.c | 128 +++++++++++++++++++++++++++--------------- drivers/scsi/lpfc/lpfc_sli.h | 1 + drivers/scsi/lpfc/lpfc_sli4.h | 7 +++ 8 files changed, 141 insertions(+), 63 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index ce0599dcc814..4d45e6939783 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -509,7 +509,6 @@ struct lpfc_hba { int (*lpfc_hba_down_link) (struct lpfc_hba *); - /* SLI4 specific HBA data structure */ struct lpfc_sli4_hba sli4_hba; diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 6f0fb51eb461..e7f548281b94 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -385,7 +385,7 @@ void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t); int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); void lpfc_start_fdiscs(struct lpfc_hba *phba); struct lpfc_vport *lpfc_find_vport_by_vpid(struct lpfc_hba *, uint16_t); - +struct lpfc_sglq *__lpfc_get_active_sglq(struct lpfc_hba *, uint16_t); #define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) #define HBA_EVENT_RSCN 5 #define HBA_EVENT_LINK_UP 2 diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 4623323da577..6a2135a0d03a 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -6234,7 +6234,8 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_mbx_unreg_vpi(vport); spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; - vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; + if (phba->sli_rev == LPFC_SLI_REV4) + vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; spin_unlock_irq(shost->host_lock); } @@ -6812,21 +6813,27 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; unsigned long iflag = 0; - spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, iflag); + spin_lock_irqsave(&phba->hbalock, iflag); + spin_lock(&phba->sli4_hba.abts_sgl_list_lock); list_for_each_entry_safe(sglq_entry, sglq_next, &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { if (sglq_entry->sli4_xritag == xri) { list_del(&sglq_entry->list); - spin_unlock_irqrestore( - &phba->sli4_hba.abts_sgl_list_lock, - iflag); - spin_lock_irqsave(&phba->hbalock, iflag); - list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); + sglq_entry->state = SGL_FREED; + spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); spin_unlock_irqrestore(&phba->hbalock, iflag); return; } } - spin_unlock_irqrestore(&phba->sli4_hba.abts_sgl_list_lock, iflag); + spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); + sglq_entry = __lpfc_get_active_sglq(phba, xri); + if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { + spin_unlock_irqrestore(&phba->hbalock, iflag); + return; + } + sglq_entry->state = SGL_XRI_ABORTED; + spin_unlock_irqrestore(&phba->hbalock, iflag); + return; } diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 437ddc92ebea..b7889c53fe23 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -822,6 +822,8 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) LIST_HEAD(aborts); int ret; unsigned long iflag = 0; + struct lpfc_sglq *sglq_entry = NULL; + ret = lpfc_hba_down_post_s3(phba); if (ret) return ret; @@ -837,6 +839,10 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) * list. */ spin_lock(&phba->sli4_hba.abts_sgl_list_lock); + list_for_each_entry(sglq_entry, + &phba->sli4_hba.lpfc_abts_els_sgl_list, list) + sglq_entry->state = SGL_FREED; + list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, &phba->sli4_hba.lpfc_sgl_list); spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); @@ -4412,6 +4418,7 @@ lpfc_init_sgl_list(struct lpfc_hba *phba) /* The list order is used by later block SGL registraton */ spin_lock_irq(&phba->hbalock); + sglq_entry->state = SGL_FREED; list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; phba->sli4_hba.total_sglq_bufs++; diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 889a7b9ec92b..a4881f26ab1b 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -620,23 +620,40 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); struct lpfc_scsi_buf *psb, *next_psb; unsigned long iflag = 0; + struct lpfc_iocbq *iocbq; + int i; - spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag); + spin_lock_irqsave(&phba->hbalock, iflag); + spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); list_for_each_entry_safe(psb, next_psb, &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) { if (psb->cur_iocbq.sli4_xritag == xri) { list_del(&psb->list); psb->exch_busy = 0; psb->status = IOSTAT_SUCCESS; - spin_unlock_irqrestore( - &phba->sli4_hba.abts_scsi_buf_list_lock, - iflag); + spin_unlock( + &phba->sli4_hba.abts_scsi_buf_list_lock); + spin_unlock_irqrestore(&phba->hbalock, iflag); lpfc_release_scsi_buf_s4(phba, psb); return; } } - spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, - iflag); + spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); + for (i = 1; i <= phba->sli.last_iotag; i++) { + iocbq = phba->sli.iocbq_lookup[i]; + + if (!(iocbq->iocb_flag & LPFC_IO_FCP) || + (iocbq->iocb_flag & LPFC_IO_LIBDFC)) + continue; + if (iocbq->sli4_xritag != xri) + continue; + psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); + psb->exch_busy = 0; + spin_unlock_irqrestore(&phba->hbalock, iflag); + return; + + } + spin_unlock_irqrestore(&phba->hbalock, iflag); } /** @@ -1006,6 +1023,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) struct scatterlist *sgel = NULL; struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; + struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq; IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde; dma_addr_t physaddr; @@ -1056,6 +1074,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) physaddr = sg_dma_address(sgel); if (phba->sli_rev == 3 && !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && + !(iocbq->iocb_flag & DSS_SECURITY_OP) && nseg <= LPFC_EXT_DATA_BDE_COUNT) { data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; data_bde->tus.f.bdeSize = sg_dma_len(sgel); @@ -1082,7 +1101,8 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) * explicitly reinitialized since all iocb memory resources are reused. */ if (phba->sli_rev == 3 && - !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { + !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && + !(iocbq->iocb_flag & DSS_SECURITY_OP)) { if (num_bde > LPFC_EXT_DATA_BDE_COUNT) { /* * The extended IOCB format can only fit 3 BDE or a BPL. @@ -1107,6 +1127,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) } else { iocb_cmd->un.fcpi64.bdl.bdeSize = ((num_bde + 2) * sizeof(struct ulp_bde64)); + iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); } fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 49bed3e8c95d..9feeaff47a52 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -494,7 +494,7 @@ __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) * * Returns sglq ponter = success, NULL = Failure. **/ -static struct lpfc_sglq * +struct lpfc_sglq * __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) { uint16_t adj_xri; @@ -526,6 +526,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba) return NULL; adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base; phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq; + sglq->state = SGL_ALLOCATED; return sglq; } @@ -580,15 +581,18 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) else sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag); if (sglq) { - if (iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) { + if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && + (sglq->state != SGL_XRI_ABORTED)) { spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, iflag); list_add(&sglq->list, &phba->sli4_hba.lpfc_abts_els_sgl_list); spin_unlock_irqrestore( &phba->sli4_hba.abts_sgl_list_lock, iflag); - } else + } else { + sglq->state = SGL_FREED; list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list); + } } @@ -2258,41 +2262,56 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, spin_unlock_irqrestore(&phba->hbalock, iflag); } - if ((phba->sli_rev == LPFC_SLI_REV4) && - (saveq->iocb_flag & LPFC_EXCHANGE_BUSY)) { - /* Set cmdiocb flag for the exchange - * busy so sgl (xri) will not be - * released until the abort xri is - * received from hba, clear the - * LPFC_DRIVER_ABORTED bit in case - * it was driver initiated abort. - */ - spin_lock_irqsave(&phba->hbalock, - iflag); - cmdiocbp->iocb_flag &= - ~LPFC_DRIVER_ABORTED; - cmdiocbp->iocb_flag |= - LPFC_EXCHANGE_BUSY; - spin_unlock_irqrestore(&phba->hbalock, - iflag); - cmdiocbp->iocb.ulpStatus = - IOSTAT_LOCAL_REJECT; - cmdiocbp->iocb.un.ulpWord[4] = - IOERR_ABORT_REQUESTED; - /* - * For SLI4, irsiocb contains NO_XRI - * in sli_xritag, it shall not affect - * releasing sgl (xri) process. - */ - saveq->iocb.ulpStatus = - IOSTAT_LOCAL_REJECT; - saveq->iocb.un.ulpWord[4] = - IOERR_SLI_ABORTED; - spin_lock_irqsave(&phba->hbalock, - iflag); - saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; - spin_unlock_irqrestore(&phba->hbalock, - iflag); + if (phba->sli_rev == LPFC_SLI_REV4) { + if (saveq->iocb_flag & + LPFC_EXCHANGE_BUSY) { + /* Set cmdiocb flag for the + * exchange busy so sgl (xri) + * will not be released until + * the abort xri is received + * from hba. + */ + spin_lock_irqsave( + &phba->hbalock, iflag); + cmdiocbp->iocb_flag |= + LPFC_EXCHANGE_BUSY; + spin_unlock_irqrestore( + &phba->hbalock, iflag); + } + if (cmdiocbp->iocb_flag & + LPFC_DRIVER_ABORTED) { + /* + * Clear LPFC_DRIVER_ABORTED + * bit in case it was driver + * initiated abort. + */ + spin_lock_irqsave( + &phba->hbalock, iflag); + cmdiocbp->iocb_flag &= + ~LPFC_DRIVER_ABORTED; + spin_unlock_irqrestore( + &phba->hbalock, iflag); + cmdiocbp->iocb.ulpStatus = + IOSTAT_LOCAL_REJECT; + cmdiocbp->iocb.un.ulpWord[4] = + IOERR_ABORT_REQUESTED; + /* + * For SLI4, irsiocb contains + * NO_XRI in sli_xritag, it + * shall not affect releasing + * sgl (xri) process. + */ + saveq->iocb.ulpStatus = + IOSTAT_LOCAL_REJECT; + saveq->iocb.un.ulpWord[4] = + IOERR_SLI_ABORTED; + spin_lock_irqsave( + &phba->hbalock, iflag); + saveq->iocb_flag |= + LPFC_DELAY_MEM_FREE; + spin_unlock_irqrestore( + &phba->hbalock, iflag); + } } } (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); @@ -2515,14 +2534,16 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, &rspiocbq); - if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { - spin_unlock_irqrestore(&phba->hbalock, - iflag); - (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, - &rspiocbq); - spin_lock_irqsave(&phba->hbalock, - iflag); - } + if (unlikely(!cmdiocbq)) + break; + if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) + cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; + if (cmdiocbq->iocb_cmpl) { + spin_unlock_irqrestore(&phba->hbalock, iflag); + (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, + &rspiocbq); + spin_lock_irqsave(&phba->hbalock, iflag); + } break; case LPFC_UNSOL_IOCB: spin_unlock_irqrestore(&phba->hbalock, iflag); @@ -7451,6 +7472,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, { wait_queue_head_t *pdone_q; unsigned long iflags; + struct lpfc_scsi_buf *lpfc_cmd; spin_lock_irqsave(&phba->hbalock, iflags); cmdiocbq->iocb_flag |= LPFC_IO_WAKE; @@ -7458,6 +7480,14 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, &rspiocbq->iocb, sizeof(IOCB_t)); + /* Set the exchange busy flag for task management commands */ + if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) && + !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) { + lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf, + cur_iocbq); + lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY; + } + pdone_q = cmdiocbq->context_un.wait_queue; if (pdone_q) wake_up(pdone_q); @@ -9076,6 +9106,12 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, /* Fake the irspiocb and copy necessary response information */ lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe); + if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { + spin_lock_irqsave(&phba->hbalock, iflags); + cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; + spin_unlock_irqrestore(&phba->hbalock, iflags); + } + /* Pass the cmd_iocb and the rsp state to the upper layer */ (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); } diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index dfcf5437d1f5..b4a639c47616 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -62,6 +62,7 @@ struct lpfc_iocbq { #define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */ #define LPFC_EXCHANGE_BUSY 0x40 /* SLI4 hba reported XB in response */ #define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */ +#define DSS_SECURITY_OP 0x100 /* security IO */ #define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */ #define LPFC_FIP_ELS_ID_SHIFT 14 diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 86308836600f..04fd7829cf39 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -431,11 +431,18 @@ enum lpfc_sge_type { SCSI_BUFF_TYPE }; +enum lpfc_sgl_state { + SGL_FREED, + SGL_ALLOCATED, + SGL_XRI_ABORTED +}; + struct lpfc_sglq { /* lpfc_sglqs are used in double linked lists */ struct list_head list; struct list_head clist; enum lpfc_sge_type buff_type; /* is this a scsi sgl */ + enum lpfc_sgl_state state; uint16_t iotag; /* pre-assigned IO tag */ uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ struct sli4_sge *sgl; /* pre-assigned SGL */ -- cgit v1.2.3 From e2aed29f29d0d289df3b0b627b122832d4dc80fe Mon Sep 17 00:00:00 2001 From: James Smart Date: Fri, 26 Feb 2010 14:15:00 -0500 Subject: [SCSI] lpfc 8.3.10: Added management for LP21000 through BSG. Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc.h | 6 + drivers/scsi/lpfc/lpfc_bsg.c | 332 ++++++++++++++++++++++++++++++++++++++++++ drivers/scsi/lpfc/lpfc_bsg.h | 12 ++ drivers/scsi/lpfc/lpfc_init.c | 8 + 4 files changed, 358 insertions(+) diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 4d45e6939783..565e16dd74fc 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -37,6 +37,9 @@ struct lpfc_sli2_slim; the NameServer before giving up. */ #define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */ #define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */ +#define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128 /* sg element count per scsi + cmnd for menlo needs nearly twice as for firmware + downloads using bsg */ #define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */ #define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ #define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/ @@ -806,6 +809,9 @@ struct lpfc_hba { struct list_head ct_ev_waiters; struct unsol_rcv_ct_ctx ct_ctx[64]; uint32_t ctx_idx; + + uint8_t menlo_flag; /* menlo generic flags */ +#define HBA_MENLO_SUPPORT 0x1 /* HBA supports menlo commands */ }; static inline struct Scsi_Host * diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index f3f1bf1a0a71..692c29f6048e 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -83,15 +83,28 @@ struct lpfc_bsg_mbox { struct fc_bsg_job *set_job; }; +#define MENLO_DID 0x0000FC0E + +struct lpfc_bsg_menlo { + struct lpfc_iocbq *cmdiocbq; + struct lpfc_iocbq *rspiocbq; + struct lpfc_dmabuf *bmp; + + /* job waiting for this iocb to finish */ + struct fc_bsg_job *set_job; +}; + #define TYPE_EVT 1 #define TYPE_IOCB 2 #define TYPE_MBOX 3 +#define TYPE_MENLO 4 struct bsg_job_data { uint32_t type; union { struct lpfc_bsg_event *evt; struct lpfc_bsg_iocb iocb; struct lpfc_bsg_mbox mbox; + struct lpfc_bsg_menlo menlo; } context_un; }; @@ -2456,6 +2469,18 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba, case MBX_PORT_IOV_CONTROL: break; case MBX_SET_VARIABLE: + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "1226 mbox: set_variable 0x%x, 0x%x\n", + mb->un.varWords[0], + mb->un.varWords[1]); + if ((mb->un.varWords[0] == SETVAR_MLOMNT) + && (mb->un.varWords[1] == 1)) { + phba->wait_4_mlo_maint_flg = 1; + } else if (mb->un.varWords[0] == SETVAR_MLORST) { + phba->link_flag &= ~LS_LOOPBACK_MODE; + phba->fc_topology = TOPOLOGY_PT_PT; + } + break; case MBX_RUN_BIU_DIAG64: case MBX_READ_EVENT_LOG: case MBX_READ_SPARM64: @@ -2637,6 +2662,297 @@ job_error: return rc; } +/** + * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler + * @phba: Pointer to HBA context object. + * @cmdiocbq: Pointer to command iocb. + * @rspiocbq: Pointer to response iocb. + * + * This function is the completion handler for iocbs issued using + * lpfc_menlo_cmd function. This function is called by the + * ring event handler function without any lock held. This function + * can be called from both worker thread context and interrupt + * context. This function also can be called from another thread which + * cleans up the SLI layer objects. + * This function copies the contents of the response iocb to the + * response iocb memory object provided by the caller of + * lpfc_sli_issue_iocb_wait and then wakes up the thread which + * sleeps for the iocb completion. + **/ +static void +lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba, + struct lpfc_iocbq *cmdiocbq, + struct lpfc_iocbq *rspiocbq) +{ + struct bsg_job_data *dd_data; + struct fc_bsg_job *job; + IOCB_t *rsp; + struct lpfc_dmabuf *bmp; + struct lpfc_bsg_menlo *menlo; + unsigned long flags; + struct menlo_response *menlo_resp; + int rc = 0; + + spin_lock_irqsave(&phba->ct_ev_lock, flags); + dd_data = cmdiocbq->context1; + if (!dd_data) { + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + return; + } + + menlo = &dd_data->context_un.menlo; + job = menlo->set_job; + job->dd_data = NULL; /* so timeout handler does not reply */ + + spin_lock_irqsave(&phba->hbalock, flags); + cmdiocbq->iocb_flag |= LPFC_IO_WAKE; + if (cmdiocbq->context2 && rspiocbq) + memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, + &rspiocbq->iocb, sizeof(IOCB_t)); + spin_unlock_irqrestore(&phba->hbalock, flags); + + bmp = menlo->bmp; + rspiocbq = menlo->rspiocbq; + rsp = &rspiocbq->iocb; + + pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, + job->request_payload.sg_cnt, DMA_TO_DEVICE); + pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, + job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + + /* always return the xri, this would be used in the case + * of a menlo download to allow the data to be sent as a continuation + * of the exchange. + */ + menlo_resp = (struct menlo_response *) + job->reply->reply_data.vendor_reply.vendor_rsp; + menlo_resp->xri = rsp->ulpContext; + if (rsp->ulpStatus) { + if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { + switch (rsp->un.ulpWord[4] & 0xff) { + case IOERR_SEQUENCE_TIMEOUT: + rc = -ETIMEDOUT; + break; + case IOERR_INVALID_RPI: + rc = -EFAULT; + break; + default: + rc = -EACCES; + break; + } + } else + rc = -EACCES; + } else + job->reply->reply_payload_rcv_len = + rsp->un.genreq64.bdl.bdeSize; + + lpfc_mbuf_free(phba, bmp->virt, bmp->phys); + lpfc_sli_release_iocbq(phba, rspiocbq); + lpfc_sli_release_iocbq(phba, cmdiocbq); + kfree(bmp); + kfree(dd_data); + /* make error code available to userspace */ + job->reply->result = rc; + /* complete the job back to userspace */ + job->job_done(job); + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + return; +} + +/** + * lpfc_menlo_cmd - send an ioctl for menlo hardware + * @job: fc_bsg_job to handle + * + * This function issues a gen request 64 CR ioctl for all menlo cmd requests, + * all the command completions will return the xri for the command. + * For menlo data requests a gen request 64 CX is used to continue the exchange + * supplied in the menlo request header xri field. + **/ +static int +lpfc_menlo_cmd(struct fc_bsg_job *job) +{ + struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *cmdiocbq, *rspiocbq; + IOCB_t *cmd, *rsp; + int rc = 0; + struct menlo_command *menlo_cmd; + struct menlo_response *menlo_resp; + struct lpfc_dmabuf *bmp = NULL; + int request_nseg; + int reply_nseg; + struct scatterlist *sgel = NULL; + int numbde; + dma_addr_t busaddr; + struct bsg_job_data *dd_data; + struct ulp_bde64 *bpl = NULL; + + /* in case no data is returned return just the return code */ + job->reply->reply_payload_rcv_len = 0; + + if (job->request_len < + sizeof(struct fc_bsg_request) + + sizeof(struct menlo_command)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2784 Received MENLO_CMD request below " + "minimum size\n"); + rc = -ERANGE; + goto no_dd_data; + } + + if (job->reply_len < + sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2785 Received MENLO_CMD reply below " + "minimum size\n"); + rc = -ERANGE; + goto no_dd_data; + } + + if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2786 Adapter does not support menlo " + "commands\n"); + rc = -EPERM; + goto no_dd_data; + } + + menlo_cmd = (struct menlo_command *) + job->request->rqst_data.h_vendor.vendor_cmd; + + menlo_resp = (struct menlo_response *) + job->reply->reply_data.vendor_reply.vendor_rsp; + + /* allocate our bsg tracking structure */ + dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); + if (!dd_data) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2787 Failed allocation of dd_data\n"); + rc = -ENOMEM; + goto no_dd_data; + } + + bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!bmp) { + rc = -ENOMEM; + goto free_dd; + } + + cmdiocbq = lpfc_sli_get_iocbq(phba); + if (!cmdiocbq) { + rc = -ENOMEM; + goto free_bmp; + } + + rspiocbq = lpfc_sli_get_iocbq(phba); + if (!rspiocbq) { + rc = -ENOMEM; + goto free_cmdiocbq; + } + + rsp = &rspiocbq->iocb; + + bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); + if (!bmp->virt) { + rc = -ENOMEM; + goto free_rspiocbq; + } + + INIT_LIST_HEAD(&bmp->list); + bpl = (struct ulp_bde64 *) bmp->virt; + request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, + job->request_payload.sg_cnt, DMA_TO_DEVICE); + for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { + busaddr = sg_dma_address(sgel); + bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; + bpl->tus.f.bdeSize = sg_dma_len(sgel); + bpl->tus.w = cpu_to_le32(bpl->tus.w); + bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); + bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); + bpl++; + } + + reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list, + job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) { + busaddr = sg_dma_address(sgel); + bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; + bpl->tus.f.bdeSize = sg_dma_len(sgel); + bpl->tus.w = cpu_to_le32(bpl->tus.w); + bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); + bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); + bpl++; + } + + cmd = &cmdiocbq->iocb; + cmd->un.genreq64.bdl.ulpIoTag32 = 0; + cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); + cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); + cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; + cmd->un.genreq64.bdl.bdeSize = + (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); + cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); + cmd->un.genreq64.w5.hcsw.Dfctl = 0; + cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD; + cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */ + cmd->ulpBdeCount = 1; + cmd->ulpClass = CLASS3; + cmd->ulpOwner = OWN_CHIP; + cmd->ulpLe = 1; /* Limited Edition */ + cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; + cmdiocbq->vport = phba->pport; + /* We want the firmware to timeout before we do */ + cmd->ulpTimeout = MENLO_TIMEOUT - 5; + cmdiocbq->context3 = bmp; + cmdiocbq->context2 = rspiocbq; + cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp; + cmdiocbq->context1 = dd_data; + cmdiocbq->context2 = rspiocbq; + if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) { + cmd->ulpCommand = CMD_GEN_REQUEST64_CR; + cmd->ulpPU = MENLO_PU; /* 3 */ + cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */ + cmd->ulpContext = MENLO_CONTEXT; /* 0 */ + } else { + cmd->ulpCommand = CMD_GEN_REQUEST64_CX; + cmd->ulpPU = 1; + cmd->un.ulpWord[4] = 0; + cmd->ulpContext = menlo_cmd->xri; + } + + dd_data->type = TYPE_MENLO; + dd_data->context_un.menlo.cmdiocbq = cmdiocbq; + dd_data->context_un.menlo.rspiocbq = rspiocbq; + dd_data->context_un.menlo.set_job = job; + dd_data->context_un.menlo.bmp = bmp; + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, + MENLO_TIMEOUT - 5); + if (rc == IOCB_SUCCESS) + return 0; /* done for now */ + + /* iocb failed so cleanup */ + pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, + job->request_payload.sg_cnt, DMA_TO_DEVICE); + pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, + job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + + lpfc_mbuf_free(phba, bmp->virt, bmp->phys); + +free_rspiocbq: + lpfc_sli_release_iocbq(phba, rspiocbq); +free_cmdiocbq: + lpfc_sli_release_iocbq(phba, cmdiocbq); +free_bmp: + kfree(bmp); +free_dd: + kfree(dd_data); +no_dd_data: + /* make error code available to userspace */ + job->reply->result = rc; + job->dd_data = NULL; + return rc; +} /** * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job * @job: fc_bsg_job to handle @@ -2669,6 +2985,10 @@ lpfc_bsg_hst_vendor(struct fc_bsg_job *job) case LPFC_BSG_VENDOR_MBOX: rc = lpfc_bsg_mbox_cmd(job); break; + case LPFC_BSG_VENDOR_MENLO_CMD: + case LPFC_BSG_VENDOR_MENLO_DATA: + rc = lpfc_menlo_cmd(job); + break; default: rc = -EINVAL; job->reply->reply_payload_rcv_len = 0; @@ -2728,6 +3048,7 @@ lpfc_bsg_timeout(struct fc_bsg_job *job) struct lpfc_bsg_event *evt; struct lpfc_bsg_iocb *iocb; struct lpfc_bsg_mbox *mbox; + struct lpfc_bsg_menlo *menlo; struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; struct bsg_job_data *dd_data; unsigned long flags; @@ -2775,6 +3096,17 @@ lpfc_bsg_timeout(struct fc_bsg_job *job) spin_unlock_irqrestore(&phba->ct_ev_lock, flags); job->job_done(job); break; + case TYPE_MENLO: + menlo = &dd_data->context_un.menlo; + cmdiocb = menlo->cmdiocbq; + /* hint to completion handler that the job timed out */ + job->reply->result = -EAGAIN; + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + /* this will call our completion handler */ + spin_lock_irq(&phba->hbalock); + lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); + spin_unlock_irq(&phba->hbalock); + break; default: spin_unlock_irqrestore(&phba->ct_ev_lock, flags); break; diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h index 6c8f87e39b98..5bc630819b9e 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.h +++ b/drivers/scsi/lpfc/lpfc_bsg.h @@ -31,6 +31,8 @@ #define LPFC_BSG_VENDOR_DIAG_TEST 5 #define LPFC_BSG_VENDOR_GET_MGMT_REV 6 #define LPFC_BSG_VENDOR_MBOX 7 +#define LPFC_BSG_VENDOR_MENLO_CMD 8 +#define LPFC_BSG_VENDOR_MENLO_DATA 9 struct set_ct_event { uint32_t command; @@ -96,3 +98,13 @@ struct dfc_mbox_req { uint8_t mbOffset; }; +/* Used for menlo command or menlo data. The xri is only used for menlo data */ +struct menlo_command { + uint32_t cmd; + uint32_t xri; +}; + +struct menlo_response { + uint32_t xri; /* return the xri of the iocb exchange */ +}; + diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index b7889c53fe23..88e02a453e0e 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -2597,6 +2597,14 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) init_timer(&vport->els_tmofunc); vport->els_tmofunc.function = lpfc_els_timeout; vport->els_tmofunc.data = (unsigned long)vport; + if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { + phba->menlo_flag |= HBA_MENLO_SUPPORT; + /* check for menlo minimum sg count */ + if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) { + phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; + shost->sg_tablesize = phba->cfg_sg_seg_cnt; + } + } error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); if (error) -- cgit v1.2.3 From fc2b989be9190f3311a5ae41289828e24897a20e Mon Sep 17 00:00:00 2001 From: James Smart Date: Fri, 26 Feb 2010 14:15:29 -0500 Subject: [SCSI] lpfc 8.3.10: Fix Discovery issues - Prevent Vport discovery after reg_new_vport completes when physical logged in using FDISC. - Remove fast FCF failover fabric name matching. Allow failover to FCFs connected to different fabrics. - Added fast FCF failover in response to FCF DEAD event on current FCF record. Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc_crtn.h | 1 + drivers/scsi/lpfc/lpfc_els.c | 7 +- drivers/scsi/lpfc/lpfc_hbadisc.c | 27 ++++---- drivers/scsi/lpfc/lpfc_init.c | 140 +++++++++++++++++++++++++++++++-------- drivers/scsi/lpfc/lpfc_sli.c | 54 +++++++++++++-- drivers/scsi/lpfc/lpfc_sli4.h | 8 ++- 6 files changed, 186 insertions(+), 51 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index e7f548281b94..39739a707ed4 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -221,6 +221,7 @@ void lpfc_unregister_fcf_rescan(struct lpfc_hba *); void lpfc_unregister_unused_fcf(struct lpfc_hba *); int lpfc_sli4_redisc_fcf_table(struct lpfc_hba *); void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *); +void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *); int lpfc_mem_alloc(struct lpfc_hba *, int align); void lpfc_mem_free(struct lpfc_hba *); diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 6a2135a0d03a..a81d43306d17 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -6004,7 +6004,12 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (phba->sli_rev < LPFC_SLI_REV4) lpfc_issue_fabric_reglogin(vport); else { - lpfc_start_fdiscs(phba); + /* + * If the physical port is instantiated using + * FDISC, do not start vport discovery. + */ + if (vport->port_state != LPFC_FDISC) + lpfc_start_fdiscs(phba); lpfc_do_scr_ns_plogi(phba, vport); } } else diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index e58d8aeec09e..f28ce40dc349 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1504,7 +1504,9 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) */ spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~FCF_DISC_INPROGRESS; - phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; + phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | + FCF_DEAD_FOVER | + FCF_CVL_FOVER); spin_unlock_irq(&phba->hbalock); } @@ -1649,7 +1651,9 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); else if (phba->fcf.fcf_flag & FCF_REDISC_FOV) /* If in fast failover, mark it's completed */ - phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; + phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | + FCF_DEAD_FOVER | + FCF_CVL_FOVER); spin_unlock_irqrestore(&phba->hbalock, iflags); goto out; } @@ -1669,14 +1673,9 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) * Update on failover FCF record only if it's in FCF fast-failover * period; otherwise, update on current FCF record. */ - if (phba->fcf.fcf_flag & FCF_REDISC_FOV) { - /* Fast FCF failover only to the same fabric name */ - if (lpfc_fab_name_match(phba->fcf.current_rec.fabric_name, - new_fcf_record)) - fcf_rec = &phba->fcf.failover_rec; - else - goto read_next_fcf; - } else + if (phba->fcf.fcf_flag & FCF_REDISC_FOV) + fcf_rec = &phba->fcf.failover_rec; + else fcf_rec = &phba->fcf.current_rec; if (phba->fcf.fcf_flag & FCF_AVAILABLE) { @@ -1705,8 +1704,7 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) * If the new hba FCF record has lower priority value * than the driver FCF record, use the new record. */ - if (lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record) && - (new_fcf_record->fip_priority < fcf_rec->priority)) { + if (new_fcf_record->fip_priority < fcf_rec->priority) { /* Choose this FCF record */ __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, addr_mode, vlan_id, 0); @@ -1762,7 +1760,9 @@ read_next_fcf: sizeof(struct lpfc_fcf_rec)); /* mark the FCF fast failover completed */ spin_lock_irqsave(&phba->hbalock, iflags); - phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; + phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | + FCF_DEAD_FOVER | + FCF_CVL_FOVER); spin_unlock_irqrestore(&phba->hbalock, iflags); /* Register to the new FCF record */ lpfc_register_fcf(phba); @@ -4760,6 +4760,7 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba) return; /* Reset HBA FCF states after successful unregister FCF */ phba->fcf.fcf_flag = 0; + phba->fcf.current_rec.flag = 0; /* * If driver is not unloading, check if there is any other diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 88e02a453e0e..ff45e336917a 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -2199,8 +2199,10 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport) void __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) { - /* Clear pending FCF rediscovery wait timer */ - phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; + /* Clear pending FCF rediscovery wait and failover in progress flags */ + phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND | + FCF_DEAD_FOVER | + FCF_CVL_FOVER); /* Now, try to stop the timer */ del_timer(&phba->fcf.redisc_wait); } @@ -3211,6 +3213,68 @@ out_free_pmb: mempool_free(pmb, phba->mbox_mem_pool); } +/** + * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport + * @vport: pointer to vport data structure. + * + * This routine is to perform Clear Virtual Link (CVL) on a vport in + * response to a CVL event. + * + * Return the pointer to the ndlp with the vport if successful, otherwise + * return NULL. + **/ +static struct lpfc_nodelist * +lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) +{ + struct lpfc_nodelist *ndlp; + struct Scsi_Host *shost; + struct lpfc_hba *phba; + + if (!vport) + return NULL; + ndlp = lpfc_findnode_did(vport, Fabric_DID); + if (!ndlp) + return NULL; + phba = vport->phba; + if (!phba) + return NULL; + if (phba->pport->port_state <= LPFC_FLOGI) + return NULL; + /* If virtual link is not yet instantiated ignore CVL */ + if (vport->port_state <= LPFC_FDISC) + return NULL; + shost = lpfc_shost_from_vport(vport); + if (!shost) + return NULL; + lpfc_linkdown_port(vport); + lpfc_cleanup_pending_mbox(vport); + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_VPORT_CVL_RCVD; + spin_unlock_irq(shost->host_lock); + + return ndlp; +} + +/** + * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports + * @vport: pointer to lpfc hba data structure. + * + * This routine is to perform Clear Virtual Link (CVL) on all vports in + * response to a FCF dead event. + **/ +static void +lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) +{ + struct lpfc_vport **vports; + int i; + + vports = lpfc_create_vport_work_array(phba); + if (vports) + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) + lpfc_sli4_perform_vport_cvl(vports[i]); + lpfc_destroy_vport_work_array(phba, vports); +} + /** * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event * @phba: pointer to lpfc hba data structure. @@ -3227,7 +3291,6 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, struct lpfc_vport *vport; struct lpfc_nodelist *ndlp; struct Scsi_Host *shost; - uint32_t link_state; int active_vlink_present; struct lpfc_vport **vports; int i; @@ -3284,16 +3347,35 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, /* If the event is not for currently used fcf do nothing */ if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index) break; - /* - * Currently, driver support only one FCF - so treat this as - * a link down, but save the link state because we don't want - * it to be changed to Link Down unless it is already down. + /* We request port to rediscover the entire FCF table for + * a fast recovery from case that the current FCF record + * is no longer valid if the last CVL event hasn't already + * triggered process. */ - link_state = phba->link_state; - lpfc_linkdown(phba); - phba->link_state = link_state; - /* Unregister FCF if no devices connected to it */ - lpfc_unregister_unused_fcf(phba); + spin_lock_irq(&phba->hbalock); + if (phba->fcf.fcf_flag & FCF_CVL_FOVER) { + spin_unlock_irq(&phba->hbalock); + break; + } + /* Mark the fast failover process in progress */ + phba->fcf.fcf_flag |= FCF_DEAD_FOVER; + spin_unlock_irq(&phba->hbalock); + rc = lpfc_sli4_redisc_fcf_table(phba); + if (rc) { + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_DEAD_FOVER; + spin_unlock_irq(&phba->hbalock); + /* + * Last resort will fail over by treating this + * as a link down to FCF registration. + */ + lpfc_sli4_fcf_dead_failthrough(phba); + } else + /* Handling fast FCF failover to a DEAD FCF event + * is considered equalivant to receiving CVL to all + * vports. + */ + lpfc_sli4_perform_all_vport_cvl(phba); break; case LPFC_FCOE_EVENT_TYPE_CVL: lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, @@ -3301,23 +3383,9 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); vport = lpfc_find_vport_by_vpid(phba, acqe_fcoe->index - phba->vpi_base); - if (!vport) - break; - ndlp = lpfc_findnode_did(vport, Fabric_DID); + ndlp = lpfc_sli4_perform_vport_cvl(vport); if (!ndlp) break; - shost = lpfc_shost_from_vport(vport); - if (phba->pport->port_state <= LPFC_FLOGI) - break; - /* If virtual link is not yet instantiated ignore CVL */ - if (vport->port_state <= LPFC_FDISC) - break; - - lpfc_linkdown_port(vport); - lpfc_cleanup_pending_mbox(vport); - spin_lock_irq(shost->host_lock); - vport->fc_flag |= FC_VPORT_CVL_RCVD; - spin_unlock_irq(shost->host_lock); active_vlink_present = 0; vports = lpfc_create_vport_work_array(phba); @@ -3340,6 +3408,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, * re-instantiate the Vlink using FDISC. */ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + shost = lpfc_shost_from_vport(vport); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -3350,15 +3419,28 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, * Otherwise, we request port to rediscover * the entire FCF table for a fast recovery * from possible case that the current FCF - * is no longer valid. + * is no longer valid if the FCF_DEAD event + * hasn't already triggered process. */ + spin_lock_irq(&phba->hbalock); + if (phba->fcf.fcf_flag & FCF_DEAD_FOVER) { + spin_unlock_irq(&phba->hbalock); + break; + } + /* Mark the fast failover process in progress */ + phba->fcf.fcf_flag |= FCF_CVL_FOVER; + spin_unlock_irq(&phba->hbalock); rc = lpfc_sli4_redisc_fcf_table(phba); - if (rc) + if (rc) { + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_CVL_FOVER; + spin_unlock_irq(&phba->hbalock); /* * Last resort will be re-try on the * the current registered FCF entry. */ lpfc_retry_pport_discovery(phba); + } } break; default: diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 9feeaff47a52..bb6a4426d469 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -4519,6 +4519,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) /* Post receive buffers to the device */ lpfc_sli4_rb_setup(phba); + /* Reset HBA FCF states after HBA reset */ + phba->fcf.fcf_flag = 0; + phba->fcf.current_rec.flag = 0; + /* Start the ELS watchdog timer */ mod_timer(&vport->els_tmofunc, jiffies + HZ * (phba->fc_ratov * 2)); @@ -12069,11 +12073,26 @@ lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) "2746 Requesting for FCF rediscovery failed " "status x%x add_status x%x\n", shdr_status, shdr_add_status); - /* - * Request failed, last resort to re-try current - * registered FCF entry - */ - lpfc_retry_pport_discovery(phba); + if (phba->fcf.fcf_flag & FCF_CVL_FOVER) { + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_CVL_FOVER; + spin_unlock_irq(&phba->hbalock); + /* + * CVL event triggered FCF rediscover request failed, + * last resort to re-try current registered FCF entry. + */ + lpfc_retry_pport_discovery(phba); + } else { + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_DEAD_FOVER; + spin_unlock_irq(&phba->hbalock); + /* + * DEAD FCF event triggered FCF rediscover request + * failed, last resort to fail over as a link down + * to FCF registration. + */ + lpfc_sli4_fcf_dead_failthrough(phba); + } } else /* * Start FCF rediscovery wait timer for pending FCF @@ -12128,6 +12147,31 @@ lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba) return 0; } +/** + * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event + * @phba: pointer to lpfc hba data structure. + * + * This function is the failover routine as a last resort to the FCF DEAD + * event when driver failed to perform fast FCF failover. + **/ +void +lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba) +{ + uint32_t link_state; + + /* + * Last resort as FCF DEAD event failover will treat this as + * a link down, but save the link state because we don't want + * it to be changed to Link Down unless it is already down. + */ + link_state = phba->link_state; + lpfc_linkdown(phba); + phba->link_state = link_state; + + /* Unregister FCF if no devices connected to it */ + lpfc_unregister_unused_fcf(phba); +} + /** * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. * @phba: pointer to lpfc hba data structure. diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 04fd7829cf39..2169cd24d90c 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -153,9 +153,11 @@ struct lpfc_fcf { #define FCF_REGISTERED 0x02 /* FCF registered with FW */ #define FCF_SCAN_DONE 0x04 /* FCF table scan done */ #define FCF_IN_USE 0x08 /* Atleast one discovery completed */ -#define FCF_REDISC_PEND 0x10 /* FCF rediscovery pending */ -#define FCF_REDISC_EVT 0x20 /* FCF rediscovery event to worker thread */ -#define FCF_REDISC_FOV 0x40 /* Post FCF rediscovery fast failover */ +#define FCF_DEAD_FOVER 0x10 /* FCF DEAD triggered fast FCF failover */ +#define FCF_CVL_FOVER 0x20 /* CVL triggered fast FCF failover */ +#define FCF_REDISC_PEND 0x40 /* FCF rediscovery pending */ +#define FCF_REDISC_EVT 0x80 /* FCF rediscovery event to worker thread */ +#define FCF_REDISC_FOV 0x100 /* Post FCF rediscovery fast failover */ uint32_t addr_mode; struct lpfc_fcf_rec current_rec; struct lpfc_fcf_rec failover_rec; -- cgit v1.2.3 From 0c9ab6f5cb28199ef5de84874d135ed44f64d92b Mon Sep 17 00:00:00 2001 From: James Smart Date: Fri, 26 Feb 2010 14:15:57 -0500 Subject: [SCSI] lpfc 8.3.10: Added round robin FCF failover - Added round robin FCF failover on initial or FCF rediscovery FLOGI failure. Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc_crtn.h | 4 + drivers/scsi/lpfc/lpfc_els.c | 91 +++++++- drivers/scsi/lpfc/lpfc_hbadisc.c | 486 +++++++++++++++++++++++++++++++-------- drivers/scsi/lpfc/lpfc_init.c | 123 +++++++--- drivers/scsi/lpfc/lpfc_logmsg.h | 1 + drivers/scsi/lpfc/lpfc_mbox.c | 8 +- drivers/scsi/lpfc/lpfc_sli.c | 218 ++++++++++++++++-- drivers/scsi/lpfc/lpfc_sli4.h | 33 ++- 8 files changed, 802 insertions(+), 162 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 39739a707ed4..5087c4211b43 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -63,6 +63,7 @@ void lpfc_linkdown_port(struct lpfc_vport *); void lpfc_port_link_failure(struct lpfc_vport *); void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *); void lpfc_retry_pport_discovery(struct lpfc_hba *); void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); @@ -222,6 +223,9 @@ void lpfc_unregister_unused_fcf(struct lpfc_hba *); int lpfc_sli4_redisc_fcf_table(struct lpfc_hba *); void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *); void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *); +uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *); +int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t); +void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t); int lpfc_mem_alloc(struct lpfc_hba *, int align); void lpfc_mem_free(struct lpfc_hba *); diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index a81d43306d17..d807f36ba4f9 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -771,6 +771,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_nodelist *ndlp = cmdiocb->context1; struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; struct serv_parm *sp; + uint16_t fcf_index; int rc; /* Check to see if link went down during discovery */ @@ -788,6 +789,54 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, vport->port_state); if (irsp->ulpStatus) { + /* + * In case of FIP mode, perform round robin FCF failover + * due to new FCF discovery + */ + if ((phba->hba_flag & HBA_FIP_SUPPORT) && + (phba->fcf.fcf_flag & FCF_DISCOVERY)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, + "2611 FLOGI failed on registered " + "FCF record fcf_index:%d, trying " + "to perform round robin failover\n", + phba->fcf.current_rec.fcf_indx); + fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); + if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) { + /* + * Exhausted the eligible FCF record list, + * fail through to retry FLOGI on current + * FCF record. + */ + lpfc_printf_log(phba, KERN_WARNING, + LOG_FIP | LOG_ELS, + "2760 FLOGI exhausted FCF " + "round robin failover list, " + "retry FLOGI on the current " + "registered FCF index:%d\n", + phba->fcf.current_rec.fcf_indx); + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_DISCOVERY; + spin_unlock_irq(&phba->hbalock); + } else { + rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, + fcf_index); + if (rc) { + lpfc_printf_log(phba, KERN_WARNING, + LOG_FIP | LOG_ELS, + "2761 FLOGI round " + "robin FCF failover " + "read FCF failed " + "rc:x%x, fcf_index:" + "%d\n", rc, + phba->fcf.current_rec.fcf_indx); + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_DISCOVERY; + spin_unlock_irq(&phba->hbalock); + } else + goto out; + } + } + /* Check for retry */ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) goto out; @@ -841,8 +890,18 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, else rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); - if (!rc) + if (!rc) { + /* Mark the FCF discovery process done */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | LOG_ELS, + "2769 FLOGI successful on FCF record: " + "current_fcf_index:x%x, terminate FCF " + "round robin failover process\n", + phba->fcf.current_rec.fcf_indx); + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_DISCOVERY; + spin_unlock_irq(&phba->hbalock); goto out; + } } flogifail: @@ -6075,21 +6134,18 @@ mbox_err_exit: } /** - * lpfc_retry_pport_discovery - Start timer to retry FLOGI. + * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer * @phba: pointer to lpfc hba data structure. * - * This routine abort all pending discovery commands and - * start a timer to retry FLOGI for the physical port - * discovery. + * This routine cancels the retry delay timers to all the vports. **/ void -lpfc_retry_pport_discovery(struct lpfc_hba *phba) +lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba) { struct lpfc_vport **vports; struct lpfc_nodelist *ndlp; - struct Scsi_Host *shost; - int i; uint32_t link_state; + int i; /* Treat this failure as linkdown for all vports */ link_state = phba->link_state; @@ -6107,13 +6163,30 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba) } lpfc_destroy_vport_work_array(phba, vports); } +} + +/** + * lpfc_retry_pport_discovery - Start timer to retry FLOGI. + * @phba: pointer to lpfc hba data structure. + * + * This routine abort all pending discovery commands and + * start a timer to retry FLOGI for the physical port + * discovery. + **/ +void +lpfc_retry_pport_discovery(struct lpfc_hba *phba) +{ + struct lpfc_nodelist *ndlp; + struct Scsi_Host *shost; + + /* Cancel the all vports retry delay retry timers */ + lpfc_cancel_all_vport_retry_delay_timer(phba); /* If fabric require FLOGI, then re-instantiate physical login */ ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); if (!ndlp) return; - shost = lpfc_shost_from_vport(phba->pport); mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); spin_lock_irq(shost->host_lock); diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index f28ce40dc349..c555e3b7f202 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1481,8 +1481,6 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba, int lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) { - LPFC_MBOXQ_t *mbox; - int rc; /* * If the Link is up and no FCoE events while in the * FCF discovery, no need to restart FCF discovery. @@ -1491,88 +1489,70 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) return 0; + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2768 Pending link or FCF event during current " + "handling of the previous event: link_state:x%x, " + "evt_tag_at_scan:x%x, evt_tag_current:x%x\n", + phba->link_state, phba->fcoe_eventtag_at_fcf_scan, + phba->fcoe_eventtag); + spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_AVAILABLE; spin_unlock_irq(&phba->hbalock); - if (phba->link_state >= LPFC_LINK_UP) - lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); - else { + if (phba->link_state >= LPFC_LINK_UP) { + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, + "2780 Restart FCF table scan due to " + "pending FCF event:evt_tag_at_scan:x%x, " + "evt_tag_current:x%x\n", + phba->fcoe_eventtag_at_fcf_scan, + phba->fcoe_eventtag); + lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); + } else { /* * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS * flag */ spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~FCF_DISC_INPROGRESS; - phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | - FCF_DEAD_FOVER | - FCF_CVL_FOVER); + phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY); spin_unlock_irq(&phba->hbalock); } + /* Unregister the currently registered FCF if required */ if (unreg_fcf) { spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_REGISTERED; spin_unlock_irq(&phba->hbalock); - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mbox) { - lpfc_printf_log(phba, KERN_ERR, - LOG_DISCOVERY|LOG_MBOX, - "2610 UNREG_FCFI mbox allocation failed\n"); - return 1; - } - lpfc_unreg_fcfi(mbox, phba->fcf.fcfi); - mbox->vport = phba->pport; - mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl; - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); - if (rc == MBX_NOT_FINISHED) { - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, - "2611 UNREG_FCFI issue mbox failed\n"); - mempool_free(mbox, phba->mbox_mem_pool); - } + lpfc_sli4_unregister_fcf(phba); } - return 1; } /** - * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox. + * lpfc_sli4_fcf_rec_mbox_parse - parse non-embedded fcf record mailbox command * @phba: pointer to lpfc hba data structure. * @mboxq: pointer to mailbox object. + * @next_fcf_index: pointer to holder of next fcf index. * - * This function iterate through all the fcf records available in - * HBA and choose the optimal FCF record for discovery. After finding - * the FCF for discovery it register the FCF record and kick start - * discovery. - * If FCF_IN_USE flag is set in currently used FCF, the routine try to - * use a FCF record which match fabric name and mac address of the - * currently used FCF record. - * If the driver support only one FCF, it will try to use the FCF record - * used by BOOT_BIOS. + * This routine parses the non-embedded fcf mailbox command by performing the + * necessarily error checking, non-embedded read FCF record mailbox command + * SGE parsing, and endianness swapping. + * + * Returns the pointer to the new FCF record in the non-embedded mailbox + * command DMA memory if successfully, other NULL. */ -void -lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +static struct fcf_record * +lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, + uint16_t *next_fcf_index) { void *virt_addr; dma_addr_t phys_addr; - uint8_t *bytep; struct lpfc_mbx_sge sge; struct lpfc_mbx_read_fcf_tbl *read_fcf; uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; struct fcf_record *new_fcf_record; - uint32_t boot_flag, addr_mode; - uint32_t next_fcf_index; - struct lpfc_fcf_rec *fcf_rec = NULL; - unsigned long iflags; - uint16_t vlan_id; - int rc; - - /* If there is pending FCoE event restart FCF table scan */ - if (lpfc_check_pending_fcoe_event(phba, 0)) { - lpfc_sli4_mbox_cmd_free(phba, mboxq); - return; - } /* Get the first SGE entry from the non-embedded DMA memory. This * routine only uses a single SGE. @@ -1583,59 +1563,183 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, "2524 Failed to get the non-embedded SGE " "virtual address\n"); - goto out; + return NULL; } virt_addr = mboxq->sge_array->addr[0]; shdr = (union lpfc_sli4_cfg_shdr *)virt_addr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, - &shdr->response); - /* - * The FCF Record was read and there is no reason for the driver - * to maintain the FCF record data or memory. Instead, just need - * to book keeping the FCFIs can be used. - */ + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status) { - if (shdr_status == STATUS_FCF_TABLE_EMPTY) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + if (shdr_status == STATUS_FCF_TABLE_EMPTY) + lpfc_printf_log(phba, KERN_ERR, LOG_FIP, "2726 READ_FCF_RECORD Indicates empty " "FCF table.\n"); - } else { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + else + lpfc_printf_log(phba, KERN_ERR, LOG_FIP, "2521 READ_FCF_RECORD mailbox failed " - "with status x%x add_status x%x, mbx\n", - shdr_status, shdr_add_status); - } - goto out; + "with status x%x add_status x%x, " + "mbx\n", shdr_status, shdr_add_status); + return NULL; } - /* Interpreting the returned information of FCF records */ + + /* Interpreting the returned information of the FCF record */ read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; lpfc_sli_pcimem_bcopy(read_fcf, read_fcf, sizeof(struct lpfc_mbx_read_fcf_tbl)); - next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf); - + *next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf); new_fcf_record = (struct fcf_record *)(virt_addr + sizeof(struct lpfc_mbx_read_fcf_tbl)); lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record, sizeof(struct fcf_record)); - bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); + return new_fcf_record; +} + +/** + * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record + * @phba: pointer to lpfc hba data structure. + * @fcf_record: pointer to the fcf record. + * @vlan_id: the lowest vlan identifier associated to this fcf record. + * @next_fcf_index: the index to the next fcf record in hba's fcf table. + * + * This routine logs the detailed FCF record if the LOG_FIP loggin is + * enabled. + **/ +static void +lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba, + struct fcf_record *fcf_record, + uint16_t vlan_id, + uint16_t next_fcf_index) +{ + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2764 READ_FCF_RECORD:\n" + "\tFCF_Index : x%x\n" + "\tFCF_Avail : x%x\n" + "\tFCF_Valid : x%x\n" + "\tFIP_Priority : x%x\n" + "\tMAC_Provider : x%x\n" + "\tLowest VLANID : x%x\n" + "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n" + "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n" + "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n" + "\tNext_FCF_Index: x%x\n", + bf_get(lpfc_fcf_record_fcf_index, fcf_record), + bf_get(lpfc_fcf_record_fcf_avail, fcf_record), + bf_get(lpfc_fcf_record_fcf_valid, fcf_record), + fcf_record->fip_priority, + bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record), + vlan_id, + bf_get(lpfc_fcf_record_mac_0, fcf_record), + bf_get(lpfc_fcf_record_mac_1, fcf_record), + bf_get(lpfc_fcf_record_mac_2, fcf_record), + bf_get(lpfc_fcf_record_mac_3, fcf_record), + bf_get(lpfc_fcf_record_mac_4, fcf_record), + bf_get(lpfc_fcf_record_mac_5, fcf_record), + bf_get(lpfc_fcf_record_fab_name_0, fcf_record), + bf_get(lpfc_fcf_record_fab_name_1, fcf_record), + bf_get(lpfc_fcf_record_fab_name_2, fcf_record), + bf_get(lpfc_fcf_record_fab_name_3, fcf_record), + bf_get(lpfc_fcf_record_fab_name_4, fcf_record), + bf_get(lpfc_fcf_record_fab_name_5, fcf_record), + bf_get(lpfc_fcf_record_fab_name_6, fcf_record), + bf_get(lpfc_fcf_record_fab_name_7, fcf_record), + bf_get(lpfc_fcf_record_switch_name_0, fcf_record), + bf_get(lpfc_fcf_record_switch_name_1, fcf_record), + bf_get(lpfc_fcf_record_switch_name_2, fcf_record), + bf_get(lpfc_fcf_record_switch_name_3, fcf_record), + bf_get(lpfc_fcf_record_switch_name_4, fcf_record), + bf_get(lpfc_fcf_record_switch_name_5, fcf_record), + bf_get(lpfc_fcf_record_switch_name_6, fcf_record), + bf_get(lpfc_fcf_record_switch_name_7, fcf_record), + next_fcf_index); +} + +/** + * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler. + * @phba: pointer to lpfc hba data structure. + * @mboxq: pointer to mailbox object. + * + * This function iterates through all the fcf records available in + * HBA and chooses the optimal FCF record for discovery. After finding + * the FCF for discovery it registers the FCF record and kicks start + * discovery. + * If FCF_IN_USE flag is set in currently used FCF, the routine tries to + * use an FCF record which matches fabric name and mac address of the + * currently used FCF record. + * If the driver supports only one FCF, it will try to use the FCF record + * used by BOOT_BIOS. + */ +void +lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + struct fcf_record *new_fcf_record; + uint32_t boot_flag, addr_mode; + uint16_t fcf_index, next_fcf_index; + struct lpfc_fcf_rec *fcf_rec = NULL; + uint16_t vlan_id; + int rc; + + /* If there is pending FCoE event restart FCF table scan */ + if (lpfc_check_pending_fcoe_event(phba, 0)) { + lpfc_sli4_mbox_cmd_free(phba, mboxq); + return; + } + + /* Parse the FCF record from the non-embedded mailbox command */ + new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, + &next_fcf_index); + if (!new_fcf_record) { + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, + "2765 Mailbox command READ_FCF_RECORD " + "failed to retrieve a FCF record.\n"); + /* Let next new FCF event trigger fast failover */ + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~FCF_DISC_INPROGRESS; + spin_unlock_irq(&phba->hbalock); + lpfc_sli4_mbox_cmd_free(phba, mboxq); + return; + } + + /* Check the FCF record against the connection list */ rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, &addr_mode, &vlan_id); + + /* Log the FCF record information if turned on */ + lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, + next_fcf_index); + /* * If the fcf record does not match with connect list entries - * read the next entry. + * read the next entry; otherwise, this is an eligible FCF + * record for round robin FCF failover. */ - if (!rc) + if (!rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, + "2781 FCF record fcf_index:x%x failed FCF " + "connection list check, fcf_avail:x%x, " + "fcf_valid:x%x\n", + bf_get(lpfc_fcf_record_fcf_index, + new_fcf_record), + bf_get(lpfc_fcf_record_fcf_avail, + new_fcf_record), + bf_get(lpfc_fcf_record_fcf_valid, + new_fcf_record)); goto read_next_fcf; + } else { + fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); + rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index); + if (rc) + goto read_next_fcf; + } + /* * If this is not the first FCF discovery of the HBA, use last * FCF record for the discovery. The condition that a rescan * matches the in-use FCF record: fabric name, switch name, mac * address, and vlan_id. */ - spin_lock_irqsave(&phba->hbalock, iflags); + spin_lock_irq(&phba->hbalock); if (phba->fcf.fcf_flag & FCF_IN_USE) { if (lpfc_fab_name_match(phba->fcf.current_rec.fabric_name, new_fcf_record) && @@ -1652,9 +1756,8 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) else if (phba->fcf.fcf_flag & FCF_REDISC_FOV) /* If in fast failover, mark it's completed */ phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | - FCF_DEAD_FOVER | - FCF_CVL_FOVER); - spin_unlock_irqrestore(&phba->hbalock, iflags); + FCF_DISCOVERY); + spin_unlock_irq(&phba->hbalock); goto out; } /* @@ -1665,7 +1768,7 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) * next candidate. */ if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irq(&phba->hbalock); goto read_next_fcf; } } @@ -1688,7 +1791,7 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) /* Choose this FCF record */ __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, addr_mode, vlan_id, BOOT_ENABLE); - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irq(&phba->hbalock); goto read_next_fcf; } /* @@ -1697,7 +1800,7 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) * the next FCF record. */ if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) { - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irq(&phba->hbalock); goto read_next_fcf; } /* @@ -1709,7 +1812,7 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, addr_mode, vlan_id, 0); } - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irq(&phba->hbalock); goto read_next_fcf; } /* @@ -1722,7 +1825,7 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) BOOT_ENABLE : 0)); phba->fcf.fcf_flag |= FCF_AVAILABLE; } - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irq(&phba->hbalock); goto read_next_fcf; read_next_fcf: @@ -1738,9 +1841,22 @@ read_next_fcf: * FCF scan inprogress, and do nothing */ if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) { - spin_lock_irqsave(&phba->hbalock, iflags); + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, + "2782 No suitable FCF record " + "found during this round of " + "post FCF rediscovery scan: " + "fcf_evt_tag:x%x, fcf_index: " + "x%x\n", + phba->fcoe_eventtag_at_fcf_scan, + bf_get(lpfc_fcf_record_fcf_index, + new_fcf_record)); + /* + * Let next new FCF event trigger fast + * failover + */ + spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~FCF_DISC_INPROGRESS; - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irq(&phba->hbalock); return; } /* @@ -1752,18 +1868,23 @@ read_next_fcf: * record. */ - /* unregister the current in-use FCF record */ + /* Unregister the current in-use FCF record */ lpfc_unregister_fcf(phba); - /* replace in-use record with the new record */ + + /* Replace in-use record with the new record */ memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec, sizeof(struct lpfc_fcf_rec)); /* mark the FCF fast failover completed */ - spin_lock_irqsave(&phba->hbalock, iflags); - phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | - FCF_DEAD_FOVER | - FCF_CVL_FOVER); - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; + spin_unlock_irq(&phba->hbalock); + /* + * Set up the initial registered FCF index for FLOGI + * round robin FCF failover. + */ + phba->fcf.fcf_rr_init_indx = + phba->fcf.failover_rec.fcf_indx; /* Register to the new FCF record */ lpfc_register_fcf(phba); } else { @@ -1776,13 +1897,25 @@ read_next_fcf: return; /* * Otherwise, initial scan or post linkdown rescan, - * register with the best fit FCF record found so - * far through the scanning process. + * register with the best FCF record found so far + * through the FCF scanning process. + */ + + /* mark the initial FCF discovery completed */ + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_INIT_DISC; + spin_unlock_irq(&phba->hbalock); + /* + * Set up the initial registered FCF index for FLOGI + * round robin FCF failover */ + phba->fcf.fcf_rr_init_indx = + phba->fcf.current_rec.fcf_indx; + /* Register to the new FCF record */ lpfc_register_fcf(phba); } } else - lpfc_sli4_read_fcf_record(phba, next_fcf_index); + lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index); return; out: @@ -1792,6 +1925,141 @@ out: return; } +/** + * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf round robin read_fcf mbox cmpl hdler + * @phba: pointer to lpfc hba data structure. + * @mboxq: pointer to mailbox object. + * + * This is the callback function for FLOGI failure round robin FCF failover + * read FCF record mailbox command from the eligible FCF record bmask for + * performing the failover. If the FCF read back is not valid/available, it + * fails through to retrying FLOGI to the currently registered FCF again. + * Otherwise, if the FCF read back is valid and available, it will set the + * newly read FCF record to the failover FCF record, unregister currently + * registered FCF record, copy the failover FCF record to the current + * FCF record, and then register the current FCF record before proceeding + * to trying FLOGI on the new failover FCF. + */ +void +lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + struct fcf_record *new_fcf_record; + uint32_t boot_flag, addr_mode; + uint16_t next_fcf_index; + uint16_t current_fcf_index; + uint16_t vlan_id; + + /* If link state is not up, stop the round robin failover process */ + if (phba->link_state < LPFC_LINK_UP) { + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_DISCOVERY; + spin_unlock_irq(&phba->hbalock); + lpfc_sli4_mbox_cmd_free(phba, mboxq); + return; + } + + /* Parse the FCF record from the non-embedded mailbox command */ + new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, + &next_fcf_index); + if (!new_fcf_record) { + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, + "2766 Mailbox command READ_FCF_RECORD " + "failed to retrieve a FCF record.\n"); + goto out; + } + + /* Get the needed parameters from FCF record */ + lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, + &addr_mode, &vlan_id); + + /* Log the FCF record information if turned on */ + lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, + next_fcf_index); + + /* Upload new FCF record to the failover FCF record */ + spin_lock_irq(&phba->hbalock); + __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec, + new_fcf_record, addr_mode, vlan_id, + (boot_flag ? BOOT_ENABLE : 0)); + spin_unlock_irq(&phba->hbalock); + + current_fcf_index = phba->fcf.current_rec.fcf_indx; + + /* Unregister the current in-use FCF record */ + lpfc_unregister_fcf(phba); + + /* Replace in-use record with the new record */ + memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec, + sizeof(struct lpfc_fcf_rec)); + + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2783 FLOGI round robin FCF failover from FCF " + "(index:x%x) to FCF (index:x%x).\n", + current_fcf_index, + bf_get(lpfc_fcf_record_fcf_index, new_fcf_record)); + +out: + lpfc_sli4_mbox_cmd_free(phba, mboxq); + lpfc_register_fcf(phba); +} + +/** + * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler. + * @phba: pointer to lpfc hba data structure. + * @mboxq: pointer to mailbox object. + * + * This is the callback function of read FCF record mailbox command for + * updating the eligible FCF bmask for FLOGI failure round robin FCF + * failover when a new FCF event happened. If the FCF read back is + * valid/available and it passes the connection list check, it updates + * the bmask for the eligible FCF record for round robin failover. + */ +void +lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + struct fcf_record *new_fcf_record; + uint32_t boot_flag, addr_mode; + uint16_t fcf_index, next_fcf_index; + uint16_t vlan_id; + int rc; + + /* If link state is not up, no need to proceed */ + if (phba->link_state < LPFC_LINK_UP) + goto out; + + /* If FCF discovery period is over, no need to proceed */ + if (phba->fcf.fcf_flag & FCF_DISCOVERY) + goto out; + + /* Parse the FCF record from the non-embedded mailbox command */ + new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, + &next_fcf_index); + if (!new_fcf_record) { + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2767 Mailbox command READ_FCF_RECORD " + "failed to retrieve a FCF record.\n"); + goto out; + } + + /* Check the connection list for eligibility */ + rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, + &addr_mode, &vlan_id); + + /* Log the FCF record information if turned on */ + lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, + next_fcf_index); + + if (!rc) + goto out; + + /* Update the eligible FCF record index bmask */ + fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); + rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index); + +out: + lpfc_sli4_mbox_cmd_free(phba, mboxq); +} + /** * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command. * @phba: pointer to lpfc hba data structure. @@ -2190,10 +2458,20 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) spin_unlock_irq(&phba->hbalock); return; } + /* This is the initial FCF discovery scan */ + phba->fcf.fcf_flag |= FCF_INIT_DISC; spin_unlock_irq(&phba->hbalock); - rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); - if (rc) + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, + "2778 Start FCF table scan at linkup\n"); + + rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, + LPFC_FCOE_FCF_GET_FIRST); + if (rc) { + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_INIT_DISC; + spin_unlock_irq(&phba->hbalock); goto out; + } } return; @@ -3383,8 +3661,12 @@ lpfc_unreg_hba_rpis(struct lpfc_hba *phba) shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(shost->host_lock); list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { - if (ndlp->nlp_flag & NLP_RPI_VALID) + if (ndlp->nlp_flag & NLP_RPI_VALID) { + /* The mempool_alloc might sleep */ + spin_unlock_irq(shost->host_lock); lpfc_unreg_rpi(vports[i], ndlp); + spin_lock_irq(shost->host_lock); + } } spin_unlock_irq(shost->host_lock); } @@ -4770,13 +5052,21 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba) (phba->link_state < LPFC_LINK_UP)) return; - rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); + /* This is considered as the initial FCF discovery scan */ + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag |= FCF_INIT_DISC; + spin_unlock_irq(&phba->hbalock); + rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); - if (rc) + if (rc) { + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_INIT_DISC; + spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, "2553 lpfc_unregister_unused_fcf failed " "to read FCF record HBA state x%x\n", phba->pport->port_state); + } } /** diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index ff45e336917a..ea44239eeb33 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -2201,8 +2201,8 @@ __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) { /* Clear pending FCF rediscovery wait and failover in progress flags */ phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND | - FCF_DEAD_FOVER | - FCF_CVL_FOVER); + FCF_DEAD_DISC | + FCF_ACVL_DISC); /* Now, try to stop the timer */ del_timer(&phba->fcf.redisc_wait); } @@ -2943,6 +2943,9 @@ lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) /* FCF rediscovery event to worker thread */ phba->fcf.fcf_flag |= FCF_REDISC_EVT; spin_unlock_irq(&phba->hbalock); + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2776 FCF rediscover wait timer expired, post " + "a worker thread event for FCF table scan\n"); /* wake up worker thread */ lpfc_worker_wake_up(phba); } @@ -3300,10 +3303,11 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, switch (event_type) { case LPFC_FCOE_EVENT_TYPE_NEW_FCF: case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD: - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "2546 New FCF found index 0x%x tag 0x%x\n", - acqe_fcoe->index, - acqe_fcoe->event_tag); + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, + "2546 New FCF found/FCF parameter modified event: " + "evt_tag:x%x, fcf_index:x%x\n", + acqe_fcoe->event_tag, acqe_fcoe->index); + spin_lock_irq(&phba->hbalock); if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) || (phba->hba_flag & FCF_DISC_INPROGRESS)) { @@ -3314,6 +3318,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, spin_unlock_irq(&phba->hbalock); break; } + if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { /* * If fast FCF failover rescan event is pending, @@ -3324,12 +3329,33 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, } spin_unlock_irq(&phba->hbalock); - /* Read the FCF table and re-discover SAN. */ - rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); + if ((phba->fcf.fcf_flag & FCF_DISCOVERY) && + !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { + /* + * During period of FCF discovery, read the FCF + * table record indexed by the event to update + * FCF round robin failover eligible FCF bmask. + */ + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | + LOG_DISCOVERY, + "2779 Read new FCF record with " + "fcf_index:x%x for updating FCF " + "round robin failover bmask\n", + acqe_fcoe->index); + rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index); + } + + /* Otherwise, scan the entire FCF table and re-discover SAN */ + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, + "2770 Start FCF table scan due to new FCF " + "event: evt_tag:x%x, fcf_index:x%x\n", + acqe_fcoe->event_tag, acqe_fcoe->index); + rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, + LPFC_FCOE_FCF_GET_FIRST); if (rc) - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "2547 Read FCF record failed 0x%x\n", - rc); + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, + "2547 Issue FCF scan read FCF mailbox " + "command failed 0x%x\n", rc); break; case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: @@ -3340,7 +3366,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, break; case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, "2549 FCF disconnected from network index 0x%x" " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); @@ -3349,21 +3375,32 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, break; /* We request port to rediscover the entire FCF table for * a fast recovery from case that the current FCF record - * is no longer valid if the last CVL event hasn't already - * triggered process. + * is no longer valid if we are not in the middle of FCF + * failover process already. */ spin_lock_irq(&phba->hbalock); - if (phba->fcf.fcf_flag & FCF_CVL_FOVER) { + if (phba->fcf.fcf_flag & FCF_DISCOVERY) { spin_unlock_irq(&phba->hbalock); + /* Update FLOGI FCF failover eligible FCF bmask */ + lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index); break; } /* Mark the fast failover process in progress */ - phba->fcf.fcf_flag |= FCF_DEAD_FOVER; + phba->fcf.fcf_flag |= FCF_DEAD_DISC; spin_unlock_irq(&phba->hbalock); + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, + "2771 Start FCF fast failover process due to " + "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " + "\n", acqe_fcoe->event_tag, acqe_fcoe->index); rc = lpfc_sli4_redisc_fcf_table(phba); if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | + LOG_DISCOVERY, + "2772 Issue FCF rediscover mabilbox " + "command failed, fail through to FCF " + "dead event\n"); spin_lock_irq(&phba->hbalock); - phba->fcf.fcf_flag &= ~FCF_DEAD_FOVER; + phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; spin_unlock_irq(&phba->hbalock); /* * Last resort will fail over by treating this @@ -3378,7 +3415,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, lpfc_sli4_perform_all_vport_cvl(phba); break; case LPFC_FCOE_EVENT_TYPE_CVL: - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, "2718 Clear Virtual Link Received for VPI 0x%x" " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); vport = lpfc_find_vport_by_vpid(phba, @@ -3419,21 +3456,31 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, * Otherwise, we request port to rediscover * the entire FCF table for a fast recovery * from possible case that the current FCF - * is no longer valid if the FCF_DEAD event - * hasn't already triggered process. + * is no longer valid if we are not already + * in the FCF failover process. */ spin_lock_irq(&phba->hbalock); - if (phba->fcf.fcf_flag & FCF_DEAD_FOVER) { + if (phba->fcf.fcf_flag & FCF_DISCOVERY) { spin_unlock_irq(&phba->hbalock); break; } /* Mark the fast failover process in progress */ - phba->fcf.fcf_flag |= FCF_CVL_FOVER; + phba->fcf.fcf_flag |= FCF_ACVL_DISC; spin_unlock_irq(&phba->hbalock); + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | + LOG_DISCOVERY, + "2773 Start FCF fast failover due " + "to CVL event: evt_tag:x%x\n", + acqe_fcoe->event_tag); rc = lpfc_sli4_redisc_fcf_table(phba); if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | + LOG_DISCOVERY, + "2774 Issue FCF rediscover " + "mabilbox command failed, " + "through to CVL event\n"); spin_lock_irq(&phba->hbalock); - phba->fcf.fcf_flag &= ~FCF_CVL_FOVER; + phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; spin_unlock_irq(&phba->hbalock); /* * Last resort will be re-try on the @@ -3537,11 +3584,14 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) spin_unlock_irq(&phba->hbalock); /* Scan FCF table from the first entry to re-discover SAN */ - rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, + "2777 Start FCF table scan after FCF " + "rediscovery quiescent period over\n"); + rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); if (rc) - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "2747 Post FCF rediscovery read FCF record " - "failed 0x%x\n", rc); + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, + "2747 Issue FCF scan read FCF mailbox " + "command failed 0x%x\n", rc); } /** @@ -3833,6 +3883,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; struct lpfc_mqe *mqe; + int longs; /* Before proceed, wait for POST done and device ready */ rc = lpfc_sli4_post_status_check(phba); @@ -4009,13 +4060,24 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) goto out_free_active_sgl; } + /* Allocate eligible FCF bmask memory for FCF round robin failover */ + longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; + phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), + GFP_KERNEL); + if (!phba->fcf.fcf_rr_bmask) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2759 Failed allocate memory for FCF round " + "robin failover bmask\n"); + goto out_remove_rpi_hdrs; + } + phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * phba->cfg_fcp_eq_count), GFP_KERNEL); if (!phba->sli4_hba.fcp_eq_hdl) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2572 Failed allocate memory for fast-path " "per-EQ handle array\n"); - goto out_remove_rpi_hdrs; + goto out_free_fcf_rr_bmask; } phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * @@ -4068,6 +4130,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) out_free_fcp_eq_hdl: kfree(phba->sli4_hba.fcp_eq_hdl); +out_free_fcf_rr_bmask: + kfree(phba->fcf.fcf_rr_bmask); out_remove_rpi_hdrs: lpfc_sli4_remove_rpi_hdrs(phba); out_free_active_sgl: @@ -4113,6 +4177,9 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) lpfc_sli4_remove_rpi_hdrs(phba); lpfc_sli4_remove_rpis(phba); + /* Free eligible FCF index bmask */ + kfree(phba->fcf.fcf_rr_bmask); + /* Free the ELS sgl list */ lpfc_free_active_sgl(phba); lpfc_free_sgl_list(phba); diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h index 954ba57970a3..bb59e9273126 100644 --- a/drivers/scsi/lpfc/lpfc_logmsg.h +++ b/drivers/scsi/lpfc/lpfc_logmsg.h @@ -35,6 +35,7 @@ #define LOG_VPORT 0x00004000 /* NPIV events */ #define LOF_SECURITY 0x00008000 /* Security events */ #define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */ +#define LOG_FIP 0x00020000 /* FIP events */ #define LOG_ALL_MSG 0xffffffff /* LOG all messages */ #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 6c4dce1a30ca..1e61ae3bc4eb 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -1748,7 +1748,7 @@ lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox) } /** - * lpfc_sli4_mbx_read_fcf_record - Allocate and construct read fcf mbox cmd + * lpfc_sli4_mbx_read_fcf_rec - Allocate and construct read fcf mbox cmd * @phba: pointer to lpfc hba data structure. * @fcf_index: index to fcf table. * @@ -1759,9 +1759,9 @@ lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox) * NULL. **/ int -lpfc_sli4_mbx_read_fcf_record(struct lpfc_hba *phba, - struct lpfcMboxq *mboxq, - uint16_t fcf_index) +lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *phba, + struct lpfcMboxq *mboxq, + uint16_t fcf_index) { void *virt_addr; dma_addr_t phys_addr; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index bb6a4426d469..fe6660ca6452 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -11996,15 +11996,19 @@ lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, } /** - * lpfc_sli4_read_fcf_record - Read the driver's default FCF Record. + * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan. * @phba: pointer to lpfc hba data structure. * @fcf_index: FCF table entry offset. * - * This routine is invoked to read up to @fcf_num of FCF record from the - * device starting with the given @fcf_index. + * This routine is invoked to scan the entire FCF table by reading FCF + * record and processing it one at a time starting from the @fcf_index + * for initial FCF discovery or fast FCF failover rediscovery. + * + * Return 0 if the mailbox command is submitted sucessfully, none 0 + * otherwise. **/ int -lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) +lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) { int rc = 0, error; LPFC_MBOXQ_t *mboxq; @@ -12016,17 +12020,17 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) "2000 Failed to allocate mbox for " "READ_FCF cmd\n"); error = -ENOMEM; - goto fail_fcfscan; + goto fail_fcf_scan; } /* Construct the read FCF record mailbox command */ - rc = lpfc_sli4_mbx_read_fcf_record(phba, mboxq, fcf_index); + rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); if (rc) { error = -EINVAL; - goto fail_fcfscan; + goto fail_fcf_scan; } /* Issue the mailbox command asynchronously */ mboxq->vport = phba->pport; - mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record; + mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) error = -EIO; @@ -12034,9 +12038,13 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) spin_lock_irq(&phba->hbalock); phba->hba_flag |= FCF_DISC_INPROGRESS; spin_unlock_irq(&phba->hbalock); + /* Reset FCF round robin index bmask for new scan */ + if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) + memset(phba->fcf.fcf_rr_bmask, 0, + sizeof(*phba->fcf.fcf_rr_bmask)); error = 0; } -fail_fcfscan: +fail_fcf_scan: if (error) { if (mboxq) lpfc_sli4_mbox_cmd_free(phba, mboxq); @@ -12048,6 +12056,181 @@ fail_fcfscan: return error; } +/** + * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for round robin fcf. + * @phba: pointer to lpfc hba data structure. + * @fcf_index: FCF table entry offset. + * + * This routine is invoked to read an FCF record indicated by @fcf_index + * and to use it for FLOGI round robin FCF failover. + * + * Return 0 if the mailbox command is submitted sucessfully, none 0 + * otherwise. + **/ +int +lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) +{ + int rc = 0, error; + LPFC_MBOXQ_t *mboxq; + + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, + "2763 Failed to allocate mbox for " + "READ_FCF cmd\n"); + error = -ENOMEM; + goto fail_fcf_read; + } + /* Construct the read FCF record mailbox command */ + rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); + if (rc) { + error = -EINVAL; + goto fail_fcf_read; + } + /* Issue the mailbox command asynchronously */ + mboxq->vport = phba->pport; + mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) + error = -EIO; + else + error = 0; + +fail_fcf_read: + if (error && mboxq) + lpfc_sli4_mbox_cmd_free(phba, mboxq); + return error; +} + +/** + * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask. + * @phba: pointer to lpfc hba data structure. + * @fcf_index: FCF table entry offset. + * + * This routine is invoked to read an FCF record indicated by @fcf_index to + * determine whether it's eligible for FLOGI round robin failover list. + * + * Return 0 if the mailbox command is submitted sucessfully, none 0 + * otherwise. + **/ +int +lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) +{ + int rc = 0, error; + LPFC_MBOXQ_t *mboxq; + + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, + "2758 Failed to allocate mbox for " + "READ_FCF cmd\n"); + error = -ENOMEM; + goto fail_fcf_read; + } + /* Construct the read FCF record mailbox command */ + rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); + if (rc) { + error = -EINVAL; + goto fail_fcf_read; + } + /* Issue the mailbox command asynchronously */ + mboxq->vport = phba->pport; + mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) + error = -EIO; + else + error = 0; + +fail_fcf_read: + if (error && mboxq) + lpfc_sli4_mbox_cmd_free(phba, mboxq); + return error; +} + +/** + * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index + * @phba: pointer to lpfc hba data structure. + * + * This routine is to get the next eligible FCF record index in a round + * robin fashion. If the next eligible FCF record index equals to the + * initial round robin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) + * shall be returned, otherwise, the next eligible FCF record's index + * shall be returned. + **/ +uint16_t +lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) +{ + uint16_t next_fcf_index; + + /* Search from the currently registered FCF index */ + next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, + LPFC_SLI4_FCF_TBL_INDX_MAX, + phba->fcf.current_rec.fcf_indx); + /* Wrap around condition on phba->fcf.fcf_rr_bmask */ + if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) + next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, + LPFC_SLI4_FCF_TBL_INDX_MAX, 0); + /* Round robin failover stop condition */ + if (next_fcf_index == phba->fcf.fcf_rr_init_indx) + return LPFC_FCOE_FCF_NEXT_NONE; + + return next_fcf_index; +} + +/** + * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index + * @phba: pointer to lpfc hba data structure. + * + * This routine sets the FCF record index in to the eligible bmask for + * round robin failover search. It checks to make sure that the index + * does not go beyond the range of the driver allocated bmask dimension + * before setting the bit. + * + * Returns 0 if the index bit successfully set, otherwise, it returns + * -EINVAL. + **/ +int +lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) +{ + if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { + lpfc_printf_log(phba, KERN_ERR, LOG_FIP, + "2610 HBA FCF index reached driver's " + "book keeping dimension: fcf_index:%d, " + "driver_bmask_max:%d\n", + fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); + return -EINVAL; + } + /* Set the eligible FCF record index bmask */ + set_bit(fcf_index, phba->fcf.fcf_rr_bmask); + + return 0; +} + +/** + * lpfc_sli4_fcf_rr_index_set - Clear bmask from eligible fcf record index + * @phba: pointer to lpfc hba data structure. + * + * This routine clears the FCF record index from the eligible bmask for + * round robin failover search. It checks to make sure that the index + * does not go beyond the range of the driver allocated bmask dimension + * before clearing the bit. + **/ +void +lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) +{ + if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { + lpfc_printf_log(phba, KERN_ERR, LOG_FIP, + "2762 HBA FCF index goes beyond driver's " + "book keeping dimension: fcf_index:%d, " + "driver_bmask_max:%d\n", + fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); + return; + } + /* Clear the eligible FCF record index bmask */ + clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); +} + /** * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table * @phba: pointer to lpfc hba data structure. @@ -12069,13 +12252,13 @@ lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &redisc_fcf->header.cfg_shdr.response); if (shdr_status || shdr_add_status) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_FIP, "2746 Requesting for FCF rediscovery failed " "status x%x add_status x%x\n", shdr_status, shdr_add_status); - if (phba->fcf.fcf_flag & FCF_CVL_FOVER) { + if (phba->fcf.fcf_flag & FCF_ACVL_DISC) { spin_lock_irq(&phba->hbalock); - phba->fcf.fcf_flag &= ~FCF_CVL_FOVER; + phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; spin_unlock_irq(&phba->hbalock); /* * CVL event triggered FCF rediscover request failed, @@ -12084,7 +12267,7 @@ lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) lpfc_retry_pport_discovery(phba); } else { spin_lock_irq(&phba->hbalock); - phba->fcf.fcf_flag &= ~FCF_DEAD_FOVER; + phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; spin_unlock_irq(&phba->hbalock); /* * DEAD FCF event triggered FCF rediscover request @@ -12093,12 +12276,16 @@ lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) */ lpfc_sli4_fcf_dead_failthrough(phba); } - } else + } else { + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2775 Start FCF rediscovery quiescent period " + "wait timer before scaning FCF table\n"); /* * Start FCF rediscovery wait timer for pending FCF * before rescan FCF record table. */ lpfc_fcf_redisc_wait_start_timer(phba); + } mempool_free(mbox, phba->mbox_mem_pool); } @@ -12117,6 +12304,9 @@ lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba) struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; int rc, length; + /* Cancel retry delay timers to all vports before FCF rediscover */ + lpfc_cancel_all_vport_retry_delay_timer(phba); + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 2169cd24d90c..4a35e7b9bc5b 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -153,17 +153,27 @@ struct lpfc_fcf { #define FCF_REGISTERED 0x02 /* FCF registered with FW */ #define FCF_SCAN_DONE 0x04 /* FCF table scan done */ #define FCF_IN_USE 0x08 /* Atleast one discovery completed */ -#define FCF_DEAD_FOVER 0x10 /* FCF DEAD triggered fast FCF failover */ -#define FCF_CVL_FOVER 0x20 /* CVL triggered fast FCF failover */ -#define FCF_REDISC_PEND 0x40 /* FCF rediscovery pending */ -#define FCF_REDISC_EVT 0x80 /* FCF rediscovery event to worker thread */ -#define FCF_REDISC_FOV 0x100 /* Post FCF rediscovery fast failover */ +#define FCF_INIT_DISC 0x10 /* Initial FCF discovery */ +#define FCF_DEAD_DISC 0x20 /* FCF DEAD fast FCF failover discovery */ +#define FCF_ACVL_DISC 0x40 /* All CVL fast FCF failover discovery */ +#define FCF_DISCOVERY (FCF_INIT_DISC | FCF_DEAD_DISC | FCF_ACVL_DISC) +#define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */ +#define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */ +#define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */ uint32_t addr_mode; + uint16_t fcf_rr_init_indx; struct lpfc_fcf_rec current_rec; struct lpfc_fcf_rec failover_rec; struct timer_list redisc_wait; + unsigned long *fcf_rr_bmask; /* Eligible FCF indexes for RR failover */ }; +/* + * Maximum FCF table index, it is for driver internal book keeping, it + * just needs to be no less than the supported HBA's FCF table size. + */ +#define LPFC_SLI4_FCF_TBL_INDX_MAX 32 + #define LPFC_REGION23_SIGNATURE "RG23" #define LPFC_REGION23_VERSION 1 #define LPFC_REGION23_LAST_REC 0xff @@ -472,8 +482,8 @@ void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *); void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t); void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t, struct lpfc_mbx_sge *); -int lpfc_sli4_mbx_read_fcf_record(struct lpfc_hba *, struct lpfcMboxq *, - uint16_t); +int lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *, struct lpfcMboxq *, + uint16_t); void lpfc_sli4_hba_reset(struct lpfc_hba *); struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t, @@ -532,8 +542,13 @@ int lpfc_sli4_init_vpi(struct lpfc_hba *, uint16_t); uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool); uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool); void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t); -int lpfc_sli4_read_fcf_record(struct lpfc_hba *, uint16_t); -void lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *, LPFC_MBOXQ_t *); +int lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *, uint16_t); +int lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *, uint16_t); +int lpfc_sli4_read_fcf_rec(struct lpfc_hba *, uint16_t); +void lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *); +int lpfc_sli4_unregister_fcf(struct lpfc_hba *); int lpfc_sli4_post_status_check(struct lpfc_hba *); uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *); -- cgit v1.2.3 From 74315ad00b8ed41e9f97fe322942fa9883517ed1 Mon Sep 17 00:00:00 2001 From: James Smart Date: Fri, 26 Feb 2010 14:16:25 -0500 Subject: [SCSI] lpfc 8.3.10: Update Driver version to 8.3.10 Signed-off-by: James Smart Signed-off-by: James Bottomley --- drivers/scsi/lpfc/lpfc_version.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index ac276aa46fba..013deec5dae8 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.3.9" +#define LPFC_DRIVER_VERSION "8.3.10" #define LPFC_DRIVER_NAME "lpfc" #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" -- cgit v1.2.3 From bb2d3de1885cd69a5fc92af99c4e0c05eb5fc122 Mon Sep 17 00:00:00 2001 From: "Martin K. Petersen" Date: Tue, 2 Mar 2010 08:44:34 -0500 Subject: [SCSI] sd: Fix VPD buffer allocations Commit e3deec09 incorrectly assumed that the B0 and B1 page lengths were limited to 32 bytes. The B0 VPD page length is defined to be 64 bytes when the device supports thin provisioning. B1 is always defined to be 64 bytes. Signed-off-by: Martin K. Petersen Reported-by: Dan Carpenter Signed-off-by: James Bottomley --- drivers/scsi/sd.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 1dd4d8407694..f9995388a574 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1948,7 +1948,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) { struct request_queue *q = sdkp->disk->queue; unsigned int sector_sz = sdkp->device->sector_size; - const int vpd_len = 32; + const int vpd_len = 64; unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL); if (!buffer || @@ -1998,7 +1998,7 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) { unsigned char *buffer; u16 rot; - const int vpd_len = 32; + const int vpd_len = 64; buffer = kmalloc(vpd_len, GFP_KERNEL); -- cgit v1.2.3 From 98e1e0f07c3f1820b8ac424569ee9e9916d3665b Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Fri, 19 Feb 2010 11:46:24 -0800 Subject: [SCSI] libosd: Fix unchecked err return found by smatch Doing CHECK="smatch --two-passes gives: drivers/scsi/osd/osd_initiator.c +1435 osd_finalize_request warning: assignment to 'ret' was never used Which is an unchecked possible allocation failure, Fixed. Reported-by: Dan Carpenter Signed-off-by: Boaz Harrosh Signed-off-by: James Bottomley --- drivers/scsi/osd/osd_initiator.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index 24223473f573..60de85091502 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -1433,6 +1433,10 @@ int osd_finalize_request(struct osd_request *or, cdbh->command_specific_options |= or->attributes_mode; if (or->attributes_mode == OSD_CDB_GET_ATTR_PAGE_SET_ONE) { ret = _osd_req_finalize_attr_page(or); + if (ret) { + OSD_DEBUG("_osd_req_finalize_attr_page failed\n"); + return ret; + } } else { /* TODO: I think that for the GET_ATTR command these 2 should * be reversed to keep them in execution order (for embeded -- cgit v1.2.3 From fac829fdcaf451a20106cbc468ff886466320956 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Wed, 3 Mar 2010 11:06:56 +0530 Subject: [SCSI] raid_attrs: fix dependency problems RAID attributes uses scsi_is_sdev_device() to gate some SCSI specific checking code. This causes two problems. Firstly if SCSI == n just defining scsi_is_sdev_device() to return false might not be enough to prevent gcc from emitting the code (and thus referring to undefined symbols), so this needs surrounding with an ifdef. Secondly, using scsi_is_sdev_device() when SCSI is either y or m gives a subtle problem in the m case: raid_attrs must also be m to use the symbol. Do the usual Kconfig jiggery-pokery to fix this. Reported-by: Randy Dunlap Signed-off-by: James Bottomley --- drivers/scsi/Kconfig | 6 ++++++ drivers/scsi/raid_class.c | 2 ++ 2 files changed, 8 insertions(+) diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 9191d1ea6451..75f2336807cb 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -1,9 +1,15 @@ menu "SCSI device support" +config SCSI_MOD + tristate + default y if SCSI=n || SCSI=y + default m if SCSI=m + config RAID_ATTRS tristate "RAID Transport Class" default n depends on BLOCK + depends on SCSI_MOD ---help--- Provides RAID diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c index bd88349b8526..2c146b44d95f 100644 --- a/drivers/scsi/raid_class.c +++ b/drivers/scsi/raid_class.c @@ -63,6 +63,7 @@ static int raid_match(struct attribute_container *cont, struct device *dev) * emulated RAID devices, so start with SCSI */ struct raid_internal *i = ac_to_raid_internal(cont); +#if defined(CONFIG_SCSI) || defined(CONFIG_SCSI_MODULE) if (scsi_is_sdev_device(dev)) { struct scsi_device *sdev = to_scsi_device(dev); @@ -71,6 +72,7 @@ static int raid_match(struct attribute_container *cont, struct device *dev) return i->f->is_raid(dev); } +#endif /* FIXME: look at other subsystems too */ return 0; } -- cgit v1.2.3 From 4c147dd81966bd4ba7f026476237ba67ea72d5d9 Mon Sep 17 00:00:00 2001 From: Krishna Gudipati Date: Wed, 3 Mar 2010 17:42:11 -0800 Subject: [SCSI] bfa: Added separate MSI-X module parameters. Added separate MSI-X module parameters to selectively enable / disable MSI-X interrupts for both Brocade HBA and CNA's. Signed-off-by: Krishna Gudipati Signed-off-by: James Bottomley --- drivers/scsi/bfa/bfad_attr.c | 3 +++ drivers/scsi/bfa/bfad_intr.c | 11 ++++++++--- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c index 9129ae3040ff..adf801dbfa15 100644 --- a/drivers/scsi/bfa/bfad_attr.c +++ b/drivers/scsi/bfa/bfad_attr.c @@ -229,7 +229,9 @@ bfad_im_get_host_speed(struct Scsi_Host *shost) (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; struct bfa_pport_attr_s attr; + unsigned long flags; + spin_lock_irqsave(shost->host_lock, flags); bfa_pport_get_attr(&bfad->bfa, &attr); switch (attr.speed) { case BFA_PPORT_SPEED_8GBPS: @@ -248,6 +250,7 @@ bfad_im_get_host_speed(struct Scsi_Host *shost) fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; break; } + spin_unlock_irqrestore(shost->host_lock, flags); } /** diff --git a/drivers/scsi/bfa/bfad_intr.c b/drivers/scsi/bfa/bfad_intr.c index 7de8832f6fee..2b7dbecbebca 100644 --- a/drivers/scsi/bfa/bfad_intr.c +++ b/drivers/scsi/bfa/bfad_intr.c @@ -23,8 +23,10 @@ BFA_TRC_FILE(LDRV, INTR); /** * bfa_isr BFA driver interrupt functions */ -static int msix_disable; -module_param(msix_disable, int, S_IRUGO | S_IWUSR); +static int msix_disable_cb; +static int msix_disable_ct; +module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR); +module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR); /** * Line based interrupt handler. */ @@ -141,6 +143,7 @@ bfad_setup_intr(struct bfad_s *bfad) int error = 0; u32 mask = 0, i, num_bit = 0, max_bit = 0; struct msix_entry msix_entries[MAX_MSIX_ENTRY]; + struct pci_dev *pdev = bfad->pcidev; /* Call BFA to get the msix map for this PCI function. */ bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit); @@ -148,7 +151,9 @@ bfad_setup_intr(struct bfad_s *bfad) /* Set up the msix entry table */ bfad_init_msix_entry(bfad, msix_entries, mask, max_bit); - if (!msix_disable) { + if ((pdev->device == BFA_PCI_DEVICE_ID_CT && !msix_disable_ct) || + (pdev->device != BFA_PCI_DEVICE_ID_CT && !msix_disable_cb)) { + error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec); if (error) { /* -- cgit v1.2.3 From 5c1fb1d55672a74d1c318f67cdddbb599df9a76c Mon Sep 17 00:00:00 2001 From: Krishna Gudipati Date: Wed, 3 Mar 2010 17:42:39 -0800 Subject: [SCSI] bfa: Defined a new LPS event to clear virtual link on a vport Clear virtual links was not propagated upwards to bfa from fw. This resulted in HBA and switch being in an inconsistent state. So defined a new LPS event for clear virtual link on a vport, and also now clear virtual link on a baseport, is sent as a link down event from the fw. Signed-off-by: Krishna Gudipati Signed-off-by: James Bottomley --- drivers/scsi/bfa/bfa_lps.c | 71 +++++++++++++++++++++++++++++++++- drivers/scsi/bfa/include/bfa_svc.h | 1 + drivers/scsi/bfa/include/bfi/bfi_lps.h | 8 ++++ drivers/scsi/bfa/include/cs/bfa_plog.h | 9 ++++- drivers/scsi/bfa/vport.c | 11 ++++++ 5 files changed, 97 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/bfa/bfa_lps.c b/drivers/scsi/bfa/bfa_lps.c index 9844b45412b6..c8c2564af725 100644 --- a/drivers/scsi/bfa/bfa_lps.c +++ b/drivers/scsi/bfa/bfa_lps.c @@ -49,7 +49,7 @@ static void bfa_lps_send_login(struct bfa_lps_s *lps); static void bfa_lps_send_logout(struct bfa_lps_s *lps); static void bfa_lps_login_comp(struct bfa_lps_s *lps); static void bfa_lps_logout_comp(struct bfa_lps_s *lps); - +static void bfa_lps_cvl_event(struct bfa_lps_s *lps); /** * lps_pvt BFA LPS private functions @@ -62,6 +62,7 @@ enum bfa_lps_event { BFA_LPS_SM_RESUME = 4, /* space present in reqq queue */ BFA_LPS_SM_DELETE = 5, /* lps delete from user */ BFA_LPS_SM_OFFLINE = 6, /* Link is offline */ + BFA_LPS_SM_RX_CVL = 7, /* Rx clear virtual link */ }; static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event); @@ -101,6 +102,7 @@ bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event) bfa_lps_free(lps); break; + case BFA_LPS_SM_RX_CVL: case BFA_LPS_SM_OFFLINE: break; @@ -162,6 +164,14 @@ bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event) bfa_reqq_wcancel(&lps->wqe); break; + case BFA_LPS_SM_RX_CVL: + /* + * Login was not even sent out; so when getting out + * of this state, it will appear like a login retry + * after Clear virtual link + */ + break; + default: bfa_assert(0); } @@ -187,6 +197,15 @@ bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event) } break; + case BFA_LPS_SM_RX_CVL: + bfa_sm_set_state(lps, bfa_lps_sm_init); + + /* Let the vport module know about this event */ + bfa_lps_cvl_event(lps); + bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, + BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx"); + break; + case BFA_LPS_SM_OFFLINE: case BFA_LPS_SM_DELETE: bfa_sm_set_state(lps, bfa_lps_sm_init); @@ -395,6 +414,20 @@ bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp) bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); } +/** + * Firmware received a Clear virtual link request (for FCoE) + */ +static void +bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl) +{ + struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); + struct bfa_lps_s *lps; + + lps = BFA_LPS_FROM_TAG(mod, cvl->lp_tag); + + bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL); +} + /** * Space is available in request queue, resume queueing request to firmware. */ @@ -531,7 +564,39 @@ bfa_lps_logout_comp(struct bfa_lps_s *lps) bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg); } +/** + * Clear virtual link completion handler for non-fcs + */ +static void +bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete) +{ + struct bfa_lps_s *lps = arg; + + if (!complete) + return; + + /* Clear virtual link to base port will result in link down */ + if (lps->fdisc) + bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg); +} + +/** + * Received Clear virtual link event --direct call for fcs, + * queue for others + */ +static void +bfa_lps_cvl_event(struct bfa_lps_s *lps) +{ + if (!lps->bfa->fcs) { + bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb, + lps); + return; + } + /* Clear virtual link to base port will result in link down */ + if (lps->fdisc) + bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg); +} /** * lps_public BFA LPS public functions @@ -773,6 +838,10 @@ bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m) bfa_lps_logout_rsp(bfa, msg.logout_rsp); break; + case BFI_LPS_H2I_CVL_EVENT: + bfa_lps_rx_cvl_event(bfa, msg.cvl_event); + break; + default: bfa_trc(bfa, m->mhdr.msg_id); bfa_assert(0); diff --git a/drivers/scsi/bfa/include/bfa_svc.h b/drivers/scsi/bfa/include/bfa_svc.h index 268d956bad89..2e4372f66b8b 100644 --- a/drivers/scsi/bfa/include/bfa_svc.h +++ b/drivers/scsi/bfa/include/bfa_svc.h @@ -319,6 +319,7 @@ void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status); void bfa_cb_lps_flogo_comp(void *bfad, void *uarg); void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status); void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg); +void bfa_cb_lps_cvl_event(void *bfad, void *uarg); #endif /* __BFA_SVC_H__ */ diff --git a/drivers/scsi/bfa/include/bfi/bfi_lps.h b/drivers/scsi/bfa/include/bfi/bfi_lps.h index c59d47badb4b..7ed31bbb8696 100644 --- a/drivers/scsi/bfa/include/bfi/bfi_lps.h +++ b/drivers/scsi/bfa/include/bfi/bfi_lps.h @@ -30,6 +30,7 @@ enum bfi_lps_h2i_msgs { enum bfi_lps_i2h_msgs { BFI_LPS_H2I_LOGIN_RSP = BFA_I2HM(1), BFI_LPS_H2I_LOGOUT_RSP = BFA_I2HM(2), + BFI_LPS_H2I_CVL_EVENT = BFA_I2HM(3), }; struct bfi_lps_login_req_s { @@ -77,6 +78,12 @@ struct bfi_lps_logout_rsp_s { u8 rsvd[2]; }; +struct bfi_lps_cvl_event_s { + struct bfi_mhdr_s mh; /* common msg header */ + u8 lp_tag; + u8 rsvd[3]; +}; + union bfi_lps_h2i_msg_u { struct bfi_mhdr_s *msg; struct bfi_lps_login_req_s *login_req; @@ -87,6 +94,7 @@ union bfi_lps_i2h_msg_u { struct bfi_msg_s *msg; struct bfi_lps_login_rsp_s *login_rsp; struct bfi_lps_logout_rsp_s *logout_rsp; + struct bfi_lps_cvl_event_s *cvl_event; }; #pragma pack() diff --git a/drivers/scsi/bfa/include/cs/bfa_plog.h b/drivers/scsi/bfa/include/cs/bfa_plog.h index 670f86e5fc6e..f5bef63b5877 100644 --- a/drivers/scsi/bfa/include/cs/bfa_plog.h +++ b/drivers/scsi/bfa/include/cs/bfa_plog.h @@ -80,7 +80,8 @@ enum bfa_plog_mid { BFA_PL_MID_HAL_FCXP = 4, BFA_PL_MID_HAL_UF = 5, BFA_PL_MID_FCS = 6, - BFA_PL_MID_MAX = 7 + BFA_PL_MID_LPS = 7, + BFA_PL_MID_MAX = 8 }; #define BFA_PL_MID_STRLEN 8 @@ -118,7 +119,11 @@ enum bfa_plog_eid { BFA_PL_EID_RSCN = 17, BFA_PL_EID_DEBUG = 18, BFA_PL_EID_MISC = 19, - BFA_PL_EID_MAX = 20 + BFA_PL_EID_FIP_FCF_DISC = 20, + BFA_PL_EID_FIP_FCF_CVL = 21, + BFA_PL_EID_LOGIN = 22, + BFA_PL_EID_LOGO = 23, + BFA_PL_EID_MAX = 24 }; #define BFA_PL_ENAME_STRLEN 8 diff --git a/drivers/scsi/bfa/vport.c b/drivers/scsi/bfa/vport.c index e90f1e38c32d..8d18589e1da2 100644 --- a/drivers/scsi/bfa/vport.c +++ b/drivers/scsi/bfa/vport.c @@ -888,4 +888,15 @@ bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg) bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK); } +/** + * Received clear virtual link + */ +void +bfa_cb_lps_cvl_event(void *bfad, void *uarg) +{ + struct bfa_fcs_vport_s *vport = uarg; + /* Send an Offline followed by an ONLINE */ + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE); + bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE); +} -- cgit v1.2.3 From 2f9b8857a914b71ba1b84fb23a0a20a87de41c91 Mon Sep 17 00:00:00 2001 From: Krishna Gudipati Date: Wed, 3 Mar 2010 17:42:51 -0800 Subject: [SCSI] bfa: Enable IOC auto-recovery and IOC type fix. bfa_ioc.c: - Enable IOC auto-recovery by default. - When CNA is in FC mode, return IOC type as FC (not FCoE) bfa_iocfc.c: - Set fcmode before pci initialization/setup. Signed-off-by: Krishna Gudipati Signed-off-by: James Bottomley --- drivers/scsi/bfa/bfa_ioc.c | 6 +++--- drivers/scsi/bfa/bfa_iocfc.c | 5 +++-- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c index 397d7e9eade5..569b35d19a25 100644 --- a/drivers/scsi/bfa/bfa_ioc.c +++ b/drivers/scsi/bfa/bfa_ioc.c @@ -56,7 +56,7 @@ BFA_TRC_FILE(HAL, IOC); #define BFA_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS) #define BFA_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS) #define BFA_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS) -bfa_boolean_t bfa_auto_recover = BFA_FALSE; +bfa_boolean_t bfa_auto_recover = BFA_TRUE; /* * forward declarations @@ -1642,7 +1642,7 @@ bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param) void bfa_ioc_auto_recover(bfa_boolean_t auto_recover) { - bfa_auto_recover = BFA_FALSE; + bfa_auto_recover = auto_recover; } @@ -2082,7 +2082,7 @@ bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr) ioc_attr->state = bfa_sm_to_state(ioc_sm_table, ioc->fsm); ioc_attr->port_id = ioc->port_id; - if (!ioc->ctdev) + if (!ioc->ctdev || ioc->fcmode) ioc_attr->ioc_type = BFA_IOC_TYPE_FC; else if (ioc->ioc_mc == BFI_MC_IOCFC) ioc_attr->ioc_type = BFA_IOC_TYPE_FCoE; diff --git a/drivers/scsi/bfa/bfa_iocfc.c b/drivers/scsi/bfa/bfa_iocfc.c index d7ab792a9e54..b5e7224bda4a 100644 --- a/drivers/scsi/bfa/bfa_iocfc.c +++ b/drivers/scsi/bfa/bfa_iocfc.c @@ -619,8 +619,6 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod, bfa->trcmod, bfa->aen, bfa->logm); - bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC); - bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs); /** * Choose FC (ssid: 0x1C) v/s FCoE (ssid: 0x14) mode. @@ -628,6 +626,9 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, if (0) bfa_ioc_set_fcmode(&bfa->ioc); + bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC); + bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs); + bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev); bfa_iocfc_mem_claim(bfa, cfg, meminfo); bfa_timer_init(&bfa->timer_mod); -- cgit v1.2.3 From ab5336189a12b6561a1b5708d782a4e27e2e3b79 Mon Sep 17 00:00:00 2001 From: Krishna Gudipati Date: Wed, 3 Mar 2010 17:43:09 -0800 Subject: [SCSI] bfa: Enable new halt interrupt in BFA. bfa_intr.c: Enable new halt interrupt in BFA. bfi_ctreg.h: Expose new halt interrupt bit definition to host. Signed-off-by: Krishna Gudipati Signed-off-by: James Bottomley --- drivers/scsi/bfa/bfa_intr.c | 7 ++++--- drivers/scsi/bfa/include/bfi/bfi_ctreg.h | 1 + 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/bfa/bfa_intr.c b/drivers/scsi/bfa/bfa_intr.c index b36540e4ed76..ab463db11144 100644 --- a/drivers/scsi/bfa/bfa_intr.c +++ b/drivers/scsi/bfa/bfa_intr.c @@ -15,7 +15,7 @@ * General Public License for more details. */ #include -#include +#include #include #include #include @@ -96,7 +96,8 @@ bfa_isr_enable(struct bfa_s *bfa) bfa_msix_install(bfa); intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | - __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS); + __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | + __HFN_INT_LL_HALT); if (pci_func == 0) intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | @@ -205,7 +206,7 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec) if (intr & (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | __HFN_INT_ERR_LPU1 | - __HFN_INT_ERR_PSS)) + __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT)) bfa_msix_errint(bfa, intr); } diff --git a/drivers/scsi/bfa/include/bfi/bfi_ctreg.h b/drivers/scsi/bfa/include/bfi/bfi_ctreg.h index d3caa58c0a0a..dd2992c38afb 100644 --- a/drivers/scsi/bfa/include/bfi/bfi_ctreg.h +++ b/drivers/scsi/bfa/include/bfi/bfi_ctreg.h @@ -589,6 +589,7 @@ enum { #define __HFN_INT_MBOX_LPU1 0x00200000U #define __HFN_INT_MBOX1_LPU0 0x00400000U #define __HFN_INT_MBOX1_LPU1 0x00800000U +#define __HFN_INT_LL_HALT 0x01000000U #define __HFN_INT_CPE_MASK 0x000000ffU #define __HFN_INT_RME_MASK 0x0000ff00U -- cgit v1.2.3 From 5b098082e22c168b7df4c5c3cd924047cee7d995 Mon Sep 17 00:00:00 2001 From: Krishna Gudipati Date: Wed, 3 Mar 2010 17:43:19 -0800 Subject: [SCSI] bfa: Changes to support FDMI Driver Parameter Added a FCS function to be called during driver init, to set the FDMI Driver parameter. fdmi.c: Created a disabled state when fdmi is disabled. bfad.c: * Added fdmi_enable driver parameter. * Added support to call bfa_fcs_set_fdmi_param() to initialize fcs fdmi setting. Signed-off-by: Krishna Gudipati Signed-off-by: James Bottomley --- drivers/scsi/bfa/bfa_fcs.c | 17 +++++++++++++++++ drivers/scsi/bfa/bfad.c | 3 +++ drivers/scsi/bfa/fdmi.c | 22 +++++++++++++++++++++- drivers/scsi/bfa/include/fcs/bfa_fcs.h | 2 ++ 4 files changed, 43 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c index 7cb39a306ea9..50120c285fff 100644 --- a/drivers/scsi/bfa/bfa_fcs.c +++ b/drivers/scsi/bfa/bfa_fcs.c @@ -126,6 +126,23 @@ bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs, bfa_fcs_fabric_psymb_init(&fcs->fabric); } +/** + * @brief + * FCS FDMI Driver Parameter Initialization + * + * @param[in] fcs FCS instance + * @param[in] fdmi_enable TRUE/FALSE + * + * @return None + */ +void +bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable) +{ + + fcs->fdmi_enabled = fdmi_enable; + +} + /** * FCS instance cleanup and exit. * diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index b52b773d49d9..8e2b2a26cb74 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -53,6 +53,7 @@ static int log_level = BFA_LOG_WARNING; static int ioc_auto_recover = BFA_TRUE; static int ipfc_enable = BFA_FALSE; static int ipfc_mtu = -1; +static int fdmi_enable = BFA_TRUE; int bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH; int bfa_linkup_delay = -1; @@ -74,6 +75,7 @@ module_param(log_level, int, S_IRUGO | S_IWUSR); module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR); module_param(ipfc_enable, int, S_IRUGO | S_IWUSR); module_param(ipfc_mtu, int, S_IRUGO | S_IWUSR); +module_param(fdmi_enable, int, S_IRUGO | S_IWUSR); module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR); /* @@ -748,6 +750,7 @@ bfad_drv_init(struct bfad_s *bfad) bfa_fcs_aen_init(&bfad->bfa_fcs, bfad->aen); bfa_fcs_init(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE); bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info); + bfa_fcs_set_fdmi_param(&bfad->bfa_fcs, fdmi_enable); spin_unlock_irqrestore(&bfad->bfad_lock, flags); bfad->bfad_flags |= BFAD_DRV_INIT_DONE; diff --git a/drivers/scsi/bfa/fdmi.c b/drivers/scsi/bfa/fdmi.c index df2a1e54e16b..d76d9220b6e6 100644 --- a/drivers/scsi/bfa/fdmi.c +++ b/drivers/scsi/bfa/fdmi.c @@ -116,6 +116,9 @@ static void bfa_fcs_port_fdmi_sm_rpa_retry(struct bfa_fcs_port_fdmi_s *fdmi, enum port_fdmi_event event); static void bfa_fcs_port_fdmi_sm_online(struct bfa_fcs_port_fdmi_s *fdmi, enum port_fdmi_event event); +static void bfa_fcs_port_fdmi_sm_disabled(struct bfa_fcs_port_fdmi_s *fdmi, + enum port_fdmi_event event); + /** * Start in offline state - awaiting MS to send start. */ @@ -479,6 +482,20 @@ bfa_fcs_port_fdmi_sm_online(struct bfa_fcs_port_fdmi_s *fdmi, } } +/** + * FDMI is disabled state. + */ +static void +bfa_fcs_port_fdmi_sm_disabled(struct bfa_fcs_port_fdmi_s *fdmi, + enum port_fdmi_event event) +{ + struct bfa_fcs_port_s *port = fdmi->ms->port; + + bfa_trc(port->fcs, port->port_cfg.pwwn); + bfa_trc(port->fcs, event); + + /* No op State. It can only be enabled at Driver Init. */ +} /** * RHBA : Register HBA Attributes. @@ -1201,7 +1218,10 @@ bfa_fcs_port_fdmi_init(struct bfa_fcs_port_ms_s *ms) struct bfa_fcs_port_fdmi_s *fdmi = &ms->fdmi; fdmi->ms = ms; - bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline); + if (ms->port->fcs->fdmi_enabled) + bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline); + else + bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_disabled); } void diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs.h b/drivers/scsi/bfa/include/fcs/bfa_fcs.h index 627669c65546..0396ec460532 100644 --- a/drivers/scsi/bfa/include/fcs/bfa_fcs.h +++ b/drivers/scsi/bfa/include/fcs/bfa_fcs.h @@ -49,6 +49,7 @@ struct bfa_fcs_s { struct bfa_trc_mod_s *trcmod; /* tracing module */ struct bfa_aen_s *aen; /* aen component */ bfa_boolean_t vf_enabled; /* VF mode is enabled */ + bfa_boolean_t fdmi_enabled; /*!< FDMI is enabled */ bfa_boolean_t min_cfg; /* min cfg enabled/disabled */ u16 port_vfid; /* port default VF ID */ struct bfa_fcs_driver_info_s driver_info; @@ -64,6 +65,7 @@ void bfa_fcs_init(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad, bfa_boolean_t min_cfg); void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs, struct bfa_fcs_driver_info_s *driver_info); +void bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable); void bfa_fcs_exit(struct bfa_fcs_s *fcs); void bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod); void bfa_fcs_log_init(struct bfa_fcs_s *fcs, struct bfa_log_mod_s *logmod); -- cgit v1.2.3 From 82794a2e4153657d12a0c29272e40b47eaadb748 Mon Sep 17 00:00:00 2001 From: Krishna Gudipati Date: Wed, 3 Mar 2010 17:43:30 -0800 Subject: [SCSI] bfa: New interface to handle firmware upgrade scenario Split bfa_fcs_init() into bfa_fcs_attach() and bfa_fcs_init(). Removed empty function definitions in FCS modules Modified driver to call bfa_fcs_attach() and bfa_fcs_init() as needed. Signed-off-by: Krishna Gudipati Signed-off-by: James Bottomley --- drivers/scsi/bfa/bfa_fcs.c | 46 +++++++++++++++++++++------------- drivers/scsi/bfa/bfa_fcs_port.c | 11 ++------ drivers/scsi/bfa/bfa_fcs_uf.c | 8 +----- drivers/scsi/bfa/bfad.c | 3 ++- drivers/scsi/bfa/fabric.c | 11 +++++--- drivers/scsi/bfa/fcpim.c | 19 -------------- drivers/scsi/bfa/fcs_fabric.h | 1 + drivers/scsi/bfa/fcs_fcpim.h | 5 ---- drivers/scsi/bfa/fcs_port.h | 3 +-- drivers/scsi/bfa/fcs_rport.h | 3 --- drivers/scsi/bfa/fcs_uf.h | 3 +-- drivers/scsi/bfa/fcs_vport.h | 7 ------ drivers/scsi/bfa/include/fcs/bfa_fcs.h | 3 ++- drivers/scsi/bfa/rport.c | 17 ------------- drivers/scsi/bfa/vport.c | 17 ------------- 15 files changed, 47 insertions(+), 110 deletions(-) diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c index 50120c285fff..3516172c597c 100644 --- a/drivers/scsi/bfa/bfa_fcs.c +++ b/drivers/scsi/bfa/bfa_fcs.c @@ -36,6 +36,7 @@ * FCS sub-modules */ struct bfa_fcs_mod_s { + void (*attach) (struct bfa_fcs_s *fcs); void (*modinit) (struct bfa_fcs_s *fcs); void (*modexit) (struct bfa_fcs_s *fcs); }; @@ -43,12 +44,10 @@ struct bfa_fcs_mod_s { #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit } static struct bfa_fcs_mod_s fcs_modules[] = { - BFA_FCS_MODULE(bfa_fcs_pport), - BFA_FCS_MODULE(bfa_fcs_uf), - BFA_FCS_MODULE(bfa_fcs_fabric), - BFA_FCS_MODULE(bfa_fcs_vport), - BFA_FCS_MODULE(bfa_fcs_rport), - BFA_FCS_MODULE(bfa_fcs_fcpim), + { bfa_fcs_pport_attach, NULL, NULL }, + { bfa_fcs_uf_attach, NULL, NULL }, + { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit, + bfa_fcs_fabric_modexit }, }; /** @@ -71,16 +70,10 @@ bfa_fcs_exit_comp(void *fcs_cbarg) */ /** - * FCS instance initialization. - * - * param[in] fcs FCS instance - * param[in] bfa BFA instance - * param[in] bfad BFA driver instance - * - * return None + * fcs attach -- called once to initialize data structures at driver attach time */ void -bfa_fcs_init(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad, +bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad, bfa_boolean_t min_cfg) { int i; @@ -95,7 +88,24 @@ bfa_fcs_init(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad, for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) { mod = &fcs_modules[i]; - mod->modinit(fcs); + if (mod->attach) + mod->attach(fcs); + } +} + +/** + * fcs initialization, called once after bfa initialization is complete + */ +void +bfa_fcs_init(struct bfa_fcs_s *fcs) +{ + int i; + struct bfa_fcs_mod_s *mod; + + for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) { + mod = &fcs_modules[i]; + if (mod->modinit) + mod->modinit(fcs); } } @@ -160,10 +170,12 @@ bfa_fcs_exit(struct bfa_fcs_s *fcs) nmods = sizeof(fcs_modules) / sizeof(fcs_modules[0]); for (i = 0; i < nmods; i++) { - bfa_wc_up(&fcs->wc); mod = &fcs_modules[i]; - mod->modexit(fcs); + if (mod->modexit) { + bfa_wc_up(&fcs->wc); + mod->modexit(fcs); + } } bfa_wc_wait(&fcs->wc); diff --git a/drivers/scsi/bfa/bfa_fcs_port.c b/drivers/scsi/bfa/bfa_fcs_port.c index 9c4b24e62de1..53808d0418a1 100644 --- a/drivers/scsi/bfa/bfa_fcs_port.c +++ b/drivers/scsi/bfa/bfa_fcs_port.c @@ -55,14 +55,7 @@ bfa_fcs_pport_event_handler(void *cbarg, bfa_pport_event_t event) } void -bfa_fcs_pport_modinit(struct bfa_fcs_s *fcs) +bfa_fcs_pport_attach(struct bfa_fcs_s *fcs) { - bfa_pport_event_register(fcs->bfa, bfa_fcs_pport_event_handler, - fcs); -} - -void -bfa_fcs_pport_modexit(struct bfa_fcs_s *fcs) -{ - bfa_fcs_modexit_comp(fcs); + bfa_pport_event_register(fcs->bfa, bfa_fcs_pport_event_handler, fcs); } diff --git a/drivers/scsi/bfa/bfa_fcs_uf.c b/drivers/scsi/bfa/bfa_fcs_uf.c index ad01db6444b2..3d57d48bbae4 100644 --- a/drivers/scsi/bfa/bfa_fcs_uf.c +++ b/drivers/scsi/bfa/bfa_fcs_uf.c @@ -93,13 +93,7 @@ bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf) } void -bfa_fcs_uf_modinit(struct bfa_fcs_s *fcs) +bfa_fcs_uf_attach(struct bfa_fcs_s *fcs) { bfa_uf_recv_register(fcs->bfa, bfa_fcs_uf_recv, fcs); } - -void -bfa_fcs_uf_modexit(struct bfa_fcs_s *fcs) -{ - bfa_fcs_modexit_comp(fcs); -} diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index 8e2b2a26cb74..965dfb575e5a 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -748,7 +748,8 @@ bfad_drv_init(struct bfad_s *bfad) bfa_fcs_log_init(&bfad->bfa_fcs, bfad->logmod); bfa_fcs_trc_init(&bfad->bfa_fcs, bfad->trcmod); bfa_fcs_aen_init(&bfad->bfa_fcs, bfad->aen); - bfa_fcs_init(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE); + bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE); + bfa_fcs_init(&bfad->bfa_fcs); bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info); bfa_fcs_set_fdmi_param(&bfad->bfa_fcs, fdmi_enable); spin_unlock_irqrestore(&bfad->bfad_lock, flags); diff --git a/drivers/scsi/bfa/fabric.c b/drivers/scsi/bfa/fabric.c index a4b5dd449573..e22989886646 100644 --- a/drivers/scsi/bfa/fabric.c +++ b/drivers/scsi/bfa/fabric.c @@ -814,10 +814,10 @@ bfa_fcs_fabric_delete_comp(void *cbarg) */ /** - * Module initialization + * Attach time initialization */ void -bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs) +bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs) { struct bfa_fcs_fabric_s *fabric; @@ -841,7 +841,12 @@ bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs) bfa_wc_up(&fabric->wc); /* For the base port */ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); - bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_CREATE); +} + +void +bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs) +{ + bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_CREATE); bfa_trc(fcs, 0); } diff --git a/drivers/scsi/bfa/fcpim.c b/drivers/scsi/bfa/fcpim.c index 1f3c06efaa9e..06f8a46d1977 100644 --- a/drivers/scsi/bfa/fcpim.c +++ b/drivers/scsi/bfa/fcpim.c @@ -822,22 +822,3 @@ void bfa_fcs_itnim_resume(struct bfa_fcs_itnim_s *itnim) { } - -/** - * Module initialization - */ -void -bfa_fcs_fcpim_modinit(struct bfa_fcs_s *fcs) -{ -} - -/** - * Module cleanup - */ -void -bfa_fcs_fcpim_modexit(struct bfa_fcs_s *fcs) -{ - bfa_fcs_modexit_comp(fcs); -} - - diff --git a/drivers/scsi/bfa/fcs_fabric.h b/drivers/scsi/bfa/fcs_fabric.h index eee960820f86..8237bd5e7217 100644 --- a/drivers/scsi/bfa/fcs_fabric.h +++ b/drivers/scsi/bfa/fcs_fabric.h @@ -29,6 +29,7 @@ /* * fcs friend functions: only between fcs modules */ +void bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs); void bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs); void bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs); void bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs); diff --git a/drivers/scsi/bfa/fcs_fcpim.h b/drivers/scsi/bfa/fcs_fcpim.h index 61e9e2687de3..11e6e7bce9f6 100644 --- a/drivers/scsi/bfa/fcs_fcpim.h +++ b/drivers/scsi/bfa/fcs_fcpim.h @@ -34,11 +34,6 @@ void bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim); void bfa_fcs_itnim_pause(struct bfa_fcs_itnim_s *itnim); void bfa_fcs_itnim_resume(struct bfa_fcs_itnim_s *itnim); -/* - * Modudle init/cleanup routines. - */ -void bfa_fcs_fcpim_modinit(struct bfa_fcs_s *fcs); -void bfa_fcs_fcpim_modexit(struct bfa_fcs_s *fcs); void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, struct fchs_s *fchs, u16 len); #endif /* __FCS_FCPIM_H__ */ diff --git a/drivers/scsi/bfa/fcs_port.h b/drivers/scsi/bfa/fcs_port.h index abb65191dd27..408c06a7d164 100644 --- a/drivers/scsi/bfa/fcs_port.h +++ b/drivers/scsi/bfa/fcs_port.h @@ -26,7 +26,6 @@ /* * fcs friend functions: only between fcs modules */ -void bfa_fcs_pport_modinit(struct bfa_fcs_s *fcs); -void bfa_fcs_pport_modexit(struct bfa_fcs_s *fcs); +void bfa_fcs_pport_attach(struct bfa_fcs_s *fcs); #endif /* __FCS_PPORT_H__ */ diff --git a/drivers/scsi/bfa/fcs_rport.h b/drivers/scsi/bfa/fcs_rport.h index f601e9d74236..9c8d1d292380 100644 --- a/drivers/scsi/bfa/fcs_rport.h +++ b/drivers/scsi/bfa/fcs_rport.h @@ -24,9 +24,6 @@ #include -void bfa_fcs_rport_modinit(struct bfa_fcs_s *fcs); -void bfa_fcs_rport_modexit(struct bfa_fcs_s *fcs); - void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs, u16 len); void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport); diff --git a/drivers/scsi/bfa/fcs_uf.h b/drivers/scsi/bfa/fcs_uf.h index 96f1bdcb31ed..f591072214fe 100644 --- a/drivers/scsi/bfa/fcs_uf.h +++ b/drivers/scsi/bfa/fcs_uf.h @@ -26,7 +26,6 @@ /* * fcs friend functions: only between fcs modules */ -void bfa_fcs_uf_modinit(struct bfa_fcs_s *fcs); -void bfa_fcs_uf_modexit(struct bfa_fcs_s *fcs); +void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs); #endif /* __FCS_UF_H__ */ diff --git a/drivers/scsi/bfa/fcs_vport.h b/drivers/scsi/bfa/fcs_vport.h index 9e80b6a97b7f..32565ba666eb 100644 --- a/drivers/scsi/bfa/fcs_vport.h +++ b/drivers/scsi/bfa/fcs_vport.h @@ -22,13 +22,6 @@ #include #include -/* - * Modudle init/cleanup routines. - */ - -void bfa_fcs_vport_modinit(struct bfa_fcs_s *fcs); -void bfa_fcs_vport_modexit(struct bfa_fcs_s *fcs); - void bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport); void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport); void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport); diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs.h b/drivers/scsi/bfa/include/fcs/bfa_fcs.h index 0396ec460532..f2fd35fdee28 100644 --- a/drivers/scsi/bfa/include/fcs/bfa_fcs.h +++ b/drivers/scsi/bfa/include/fcs/bfa_fcs.h @@ -61,8 +61,9 @@ struct bfa_fcs_s { /* * bfa fcs API functions */ -void bfa_fcs_init(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad, +void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad, bfa_boolean_t min_cfg); +void bfa_fcs_init(struct bfa_fcs_s *fcs); void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs, struct bfa_fcs_driver_info_s *driver_info); void bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable); diff --git a/drivers/scsi/bfa/rport.c b/drivers/scsi/bfa/rport.c index 9cf58bb138dc..df714dcdf031 100644 --- a/drivers/scsi/bfa/rport.c +++ b/drivers/scsi/bfa/rport.c @@ -2574,23 +2574,6 @@ bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs, FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); } -/** - * Module initialization - */ -void -bfa_fcs_rport_modinit(struct bfa_fcs_s *fcs) -{ -} - -/** - * Module cleanup - */ -void -bfa_fcs_rport_modexit(struct bfa_fcs_s *fcs) -{ - bfa_fcs_modexit_comp(fcs); -} - /** * Return state of rport. */ diff --git a/drivers/scsi/bfa/vport.c b/drivers/scsi/bfa/vport.c index 8d18589e1da2..75d6f058a461 100644 --- a/drivers/scsi/bfa/vport.c +++ b/drivers/scsi/bfa/vport.c @@ -616,23 +616,6 @@ bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport) bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELCOMP); } -/** - * Module initialization - */ -void -bfa_fcs_vport_modinit(struct bfa_fcs_s *fcs) -{ -} - -/** - * Module cleanup - */ -void -bfa_fcs_vport_modexit(struct bfa_fcs_s *fcs) -{ - bfa_fcs_modexit_comp(fcs); -} - u32 bfa_fcs_vport_get_max(struct bfa_fcs_s *fcs) { -- cgit v1.2.3 From a046bf0559018ba3d16c412fc4e1aa2be5f11f36 Mon Sep 17 00:00:00 2001 From: Krishna Gudipati Date: Wed, 3 Mar 2010 17:43:45 -0800 Subject: [SCSI] bfa: Fix to allow creation of only 190 vports on CNA. Brocade CNA currently supports only 190 vports (instead of 191), since there are only 192 unicast cam entries reserved for FCoE. Brocade CNA has a total of 256 unicast cam entries (192 FCoE + 64 LL) 192 cam entries = 1 burned in mac + 1 baseport FPMA mac + 190 vport FPMA macs. Made changes to the code to support only 190 vports. Signed-off-by: Krishna Gudipati Signed-off-by: James Bottomley --- drivers/scsi/bfa/bfa_lps.c | 20 ++++++++++++++++++++ drivers/scsi/bfa/fcs_vport.h | 1 - drivers/scsi/bfa/include/bfa_svc.h | 1 + drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h | 8 -------- drivers/scsi/bfa/lport_api.c | 3 ++- drivers/scsi/bfa/vport.c | 17 +---------------- 6 files changed, 24 insertions(+), 26 deletions(-) diff --git a/drivers/scsi/bfa/bfa_lps.c b/drivers/scsi/bfa/bfa_lps.c index c8c2564af725..66b9b15f4294 100644 --- a/drivers/scsi/bfa/bfa_lps.c +++ b/drivers/scsi/bfa/bfa_lps.c @@ -18,6 +18,7 @@ #include #include #include +#include BFA_TRC_FILE(HAL, LPS); BFA_MODULE(lps); @@ -25,6 +26,12 @@ BFA_MODULE(lps); #define BFA_LPS_MIN_LPORTS (1) #define BFA_LPS_MAX_LPORTS (256) +/* + * Maximum Vports supported per physical port or vf. + */ +#define BFA_LPS_MAX_VPORTS_SUPP_CB 255 +#define BFA_LPS_MAX_VPORTS_SUPP_CT 190 + /** * forward declarations */ @@ -598,6 +605,19 @@ bfa_lps_cvl_event(struct bfa_lps_s *lps) bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg); } +u32 +bfa_lps_get_max_vport(struct bfa_s *bfa) +{ + struct bfa_ioc_attr_s ioc_attr; + + bfa_get_attr(bfa, &ioc_attr); + + if (ioc_attr.pci_attr.device_id == BFA_PCI_DEVICE_ID_CT) + return (BFA_LPS_MAX_VPORTS_SUPP_CT); + else + return (BFA_LPS_MAX_VPORTS_SUPP_CB); +} + /** * lps_public BFA LPS public functions */ diff --git a/drivers/scsi/bfa/fcs_vport.h b/drivers/scsi/bfa/fcs_vport.h index 32565ba666eb..13c32ebf946c 100644 --- a/drivers/scsi/bfa/fcs_vport.h +++ b/drivers/scsi/bfa/fcs_vport.h @@ -26,7 +26,6 @@ void bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport); void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport); void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport); void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport); -u32 bfa_fcs_vport_get_max(struct bfa_fcs_s *fcs); #endif /* __FCS_VPORT_H__ */ diff --git a/drivers/scsi/bfa/include/bfa_svc.h b/drivers/scsi/bfa/include/bfa_svc.h index 2e4372f66b8b..0d7ed4d963a7 100644 --- a/drivers/scsi/bfa/include/bfa_svc.h +++ b/drivers/scsi/bfa/include/bfa_svc.h @@ -293,6 +293,7 @@ void bfa_uf_free(struct bfa_uf_s *uf); * bfa lport service api */ +u32 bfa_lps_get_max_vport(struct bfa_s *bfa); struct bfa_lps_s *bfa_lps_alloc(struct bfa_s *bfa); void bfa_lps_delete(struct bfa_lps_s *lps); void bfa_lps_discard(struct bfa_lps_s *lps); diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h index 967ceb0eb074..ceaefd3060f4 100644 --- a/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h +++ b/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h @@ -34,14 +34,6 @@ struct bfa_fcs_s; struct bfa_fcs_fabric_s; /* -* @todo : need to move to a global config file. - * Maximum Vports supported per physical port or vf. - */ -#define BFA_FCS_MAX_VPORTS_SUPP_CB 255 -#define BFA_FCS_MAX_VPORTS_SUPP_CT 191 - -/* -* @todo : need to move to a global config file. * Maximum Rports supported per port (physical/logical). */ #define BFA_FCS_MAX_RPORTS_SUPP 256 /* @todo : tentative value */ diff --git a/drivers/scsi/bfa/lport_api.c b/drivers/scsi/bfa/lport_api.c index 1e06792cd4c2..4a4ccce99364 100644 --- a/drivers/scsi/bfa/lport_api.c +++ b/drivers/scsi/bfa/lport_api.c @@ -235,7 +235,8 @@ bfa_fcs_port_get_info(struct bfa_fcs_port_s *port, port_info->port_wwn = bfa_fcs_port_get_pwwn(port); port_info->node_wwn = bfa_fcs_port_get_nwwn(port); - port_info->max_vports_supp = bfa_fcs_vport_get_max(port->fcs); + port_info->max_vports_supp = + bfa_lps_get_max_vport(port->fcs->bfa); port_info->num_vports_inuse = bfa_fcs_fabric_vport_count(port->fabric); port_info->max_rports_supp = BFA_FCS_MAX_RPORTS_SUPP; diff --git a/drivers/scsi/bfa/vport.c b/drivers/scsi/bfa/vport.c index 75d6f058a461..3dce9e1c947d 100644 --- a/drivers/scsi/bfa/vport.c +++ b/drivers/scsi/bfa/vport.c @@ -616,21 +616,6 @@ bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport) bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELCOMP); } -u32 -bfa_fcs_vport_get_max(struct bfa_fcs_s *fcs) -{ - struct bfa_ioc_attr_s ioc_attr; - - bfa_get_attr(fcs->bfa, &ioc_attr); - - if (ioc_attr.pci_attr.device_id == BFA_PCI_DEVICE_ID_CT) - return BFA_FCS_MAX_VPORTS_SUPP_CT; - else - return BFA_FCS_MAX_VPORTS_SUPP_CB; -} - - - /** * fcs_vport_api Virtual port API */ @@ -667,7 +652,7 @@ bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs, return BFA_STATUS_VPORT_EXISTS; if (bfa_fcs_fabric_vport_count(&fcs->fabric) == - bfa_fcs_vport_get_max(fcs)) + bfa_lps_get_max_vport(fcs->bfa)) return BFA_STATUS_VPORT_MAX; vport->lps = bfa_lps_alloc(fcs->bfa); -- cgit v1.2.3 From e67143243a1a6b47e1bdcda189ffac46d2a8744d Mon Sep 17 00:00:00 2001 From: Krishna Gudipati Date: Wed, 3 Mar 2010 17:44:02 -0800 Subject: [SCSI] bfa: Resume BFA operations after firmware mismatch is resolved. bfad.c & bfad_drv.h: * Created a kernel thread from pci_probe that does the bfad start operations after BFA init done on a firmware mismatch. * The kernel thread on a fw mismatch waits for an event from IOC call back and is woken up from bfa_cb_init() on BFA init success. * In normal cases of no firmware mismatch this thread is terminated in pci_probe. bfa_fcs_lport.c, fabric.c, fcs_lport.h & vport.c: * Split the lport init to attach time and init time code, so that proper config attributes are set after firmware mismatch. bfa_iocfc.c: * Handle an IOC timer issue, where the IOC timer would expire before the init completion and send Init fail event to the driver, however IOC init continues and completes successfully at the later stage. The bfa and driver were not handling this kind of deferred init completion. Signed-off-by: Krishna Gudipati Signed-off-by: James Bottomley --- drivers/scsi/bfa/bfa_fcs_lport.c | 30 ++++--- drivers/scsi/bfa/bfa_iocfc.c | 6 +- drivers/scsi/bfa/bfad.c | 189 ++++++++++++++++++++++++++++++--------- drivers/scsi/bfa/bfad_drv.h | 12 ++- drivers/scsi/bfa/fabric.c | 4 +- drivers/scsi/bfa/fcs_lport.h | 7 +- drivers/scsi/bfa/vport.c | 3 +- 7 files changed, 192 insertions(+), 59 deletions(-) diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c index c7ab257f10a7..3d62e456950b 100644 --- a/drivers/scsi/bfa/bfa_fcs_lport.c +++ b/drivers/scsi/bfa/bfa_fcs_lport.c @@ -873,36 +873,46 @@ bfa_fcs_port_is_online(struct bfa_fcs_port_s *port) } /** - * Logical port initialization of base or virtual port. - * Called by fabric for base port or by vport for virtual ports. + * Attach time initialization of logical ports. */ void -bfa_fcs_lport_init(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs, - u16 vf_id, struct bfa_port_cfg_s *port_cfg, - struct bfa_fcs_vport_s *vport) +bfa_fcs_lport_attach(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs, + uint16_t vf_id, struct bfa_fcs_vport_s *vport) { lport->fcs = fcs; lport->fabric = bfa_fcs_vf_lookup(fcs, vf_id); - bfa_os_assign(lport->port_cfg, *port_cfg); lport->vport = vport; lport->lp_tag = (vport) ? bfa_lps_get_tag(vport->lps) : bfa_lps_get_tag(lport->fabric->lps); INIT_LIST_HEAD(&lport->rport_q); lport->num_rports = 0; +} + +/** + * Logical port initialization of base or virtual port. + * Called by fabric for base port or by vport for virtual ports. + */ - lport->bfad_port = - bfa_fcb_port_new(fcs->bfad, lport, lport->port_cfg.roles, +void +bfa_fcs_lport_init(struct bfa_fcs_port_s *lport, + struct bfa_port_cfg_s *port_cfg) +{ + struct bfa_fcs_vport_s *vport = lport->vport; + + bfa_os_assign(lport->port_cfg, *port_cfg); + + lport->bfad_port = bfa_fcb_port_new(lport->fcs->bfad, lport, + lport->port_cfg.roles, lport->fabric->vf_drv, vport ? vport->vport_drv : NULL); + bfa_fcs_port_aen_post(lport, BFA_LPORT_AEN_NEW); bfa_sm_set_state(lport, bfa_fcs_port_sm_uninit); bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE); } - - /** * fcs_lport_api */ diff --git a/drivers/scsi/bfa/bfa_iocfc.c b/drivers/scsi/bfa/bfa_iocfc.c index b5e7224bda4a..137591c984d7 100644 --- a/drivers/scsi/bfa/bfa_iocfc.c +++ b/drivers/scsi/bfa/bfa_iocfc.c @@ -336,8 +336,10 @@ bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete) bfa_cb_init(bfa->bfad, BFA_STATUS_OK); else bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED); - } else - bfa->iocfc.action = BFA_IOCFC_ACT_NONE; + } else { + if (bfa->iocfc.cfgdone) + bfa->iocfc.action = BFA_IOCFC_ACT_NONE; + } } static void diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index 965dfb575e5a..4ccaeaecd14c 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -20,6 +20,7 @@ */ #include +#include #include "bfad_drv.h" #include "bfad_im.h" #include "bfad_tm.h" @@ -97,6 +98,8 @@ bfad_fc4_probe(struct bfad_s *bfad) if (ipfc_enable) bfad_ipfc_probe(bfad); + + bfad->bfad_flags |= BFAD_FC4_PROBE_DONE; ext: return rc; } @@ -108,6 +111,7 @@ bfad_fc4_probe_undo(struct bfad_s *bfad) bfad_tm_probe_undo(bfad); if (ipfc_enable) bfad_ipfc_probe_undo(bfad); + bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE; } static void @@ -175,9 +179,19 @@ bfa_cb_init(void *drv, bfa_status_t init_status) { struct bfad_s *bfad = drv; - if (init_status == BFA_STATUS_OK) + if (init_status == BFA_STATUS_OK) { bfad->bfad_flags |= BFAD_HAL_INIT_DONE; + /* If BFAD_HAL_INIT_FAIL flag is set: + * Wake up the kernel thread to start + * the bfad operations after HAL init done + */ + if ((bfad->bfad_flags & BFAD_HAL_INIT_FAIL)) { + bfad->bfad_flags &= ~BFAD_HAL_INIT_FAIL; + wake_up_process(bfad->bfad_tsk); + } + } + complete(&bfad->comp); } @@ -749,7 +763,13 @@ bfad_drv_init(struct bfad_s *bfad) bfa_fcs_trc_init(&bfad->bfa_fcs, bfad->trcmod); bfa_fcs_aen_init(&bfad->bfa_fcs, bfad->aen); bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE); - bfa_fcs_init(&bfad->bfa_fcs); + + /* Do FCS init only when HAL init is done */ + if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) { + bfa_fcs_init(&bfad->bfa_fcs); + bfad->bfad_flags |= BFAD_FCS_INIT_DONE; + } + bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info); bfa_fcs_set_fdmi_param(&bfad->bfa_fcs, fdmi_enable); spin_unlock_irqrestore(&bfad->bfad_lock, flags); @@ -767,12 +787,22 @@ out_hal_mem_alloc_failure: void bfad_drv_uninit(struct bfad_s *bfad) { + unsigned long flags; + + spin_lock_irqsave(&bfad->bfad_lock, flags); + init_completion(&bfad->comp); + bfa_stop(&bfad->bfa); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + wait_for_completion(&bfad->comp); + del_timer_sync(&bfad->hal_tmo); bfa_isr_disable(&bfad->bfa); bfa_detach(&bfad->bfa); bfad_remove_intr(bfad); bfa_assert(list_empty(&bfad->file_q)); bfad_hal_mem_release(bfad); + + bfad->bfad_flags &= ~BFAD_DRV_INIT_DONE; } void @@ -863,6 +893,86 @@ bfad_drv_log_level_set(struct bfad_s *bfad) bfa_log_set_level_all(&bfad->log_data, log_level); } +bfa_status_t +bfad_start_ops(struct bfad_s *bfad) +{ + int retval; + + /* PPORT FCS config */ + bfad_fcs_port_cfg(bfad); + + retval = bfad_cfg_pport(bfad, BFA_PORT_ROLE_FCP_IM); + if (retval != BFA_STATUS_OK) + goto out_cfg_pport_failure; + + /* BFAD level FC4 (IM/TM/IPFC) specific resource allocation */ + retval = bfad_fc4_probe(bfad); + if (retval != BFA_STATUS_OK) { + printk(KERN_WARNING "bfad_fc4_probe failed\n"); + goto out_fc4_probe_failure; + } + + bfad_drv_start(bfad); + + /* + * If bfa_linkup_delay is set to -1 default; try to retrive the + * value using the bfad_os_get_linkup_delay(); else use the + * passed in module param value as the bfa_linkup_delay. + */ + if (bfa_linkup_delay < 0) { + + bfa_linkup_delay = bfad_os_get_linkup_delay(bfad); + bfad_os_rport_online_wait(bfad); + bfa_linkup_delay = -1; + + } else { + bfad_os_rport_online_wait(bfad); + } + + bfa_log(bfad->logmod, BFA_LOG_LINUX_DEVICE_CLAIMED, bfad->pci_name); + + return BFA_STATUS_OK; + +out_fc4_probe_failure: + bfad_fc4_probe_undo(bfad); + bfad_uncfg_pport(bfad); +out_cfg_pport_failure: + return BFA_STATUS_FAILED; +} + +int +bfad_worker (void *ptr) +{ + struct bfad_s *bfad; + unsigned long flags; + + bfad = (struct bfad_s *)ptr; + + while (!kthread_should_stop()) { + + /* Check if the FCS init is done from bfad_drv_init; + * if not done do FCS init and set the flag. + */ + if (!(bfad->bfad_flags & BFAD_FCS_INIT_DONE)) { + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfa_fcs_init(&bfad->bfa_fcs); + bfad->bfad_flags |= BFAD_FCS_INIT_DONE; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + } + + /* Start the bfad operations after HAL init done */ + bfad_start_ops(bfad); + + spin_lock_irqsave(&bfad->bfad_lock, flags); + bfad->bfad_tsk = NULL; + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + + break; + } + + return 0; +} + /* * PCI_entry PCI driver entries * { */ @@ -937,57 +1047,39 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) bfad->ref_count = 0; bfad->pport.bfad = bfad; + bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad, "%s", + "bfad_worker"); + if (IS_ERR(bfad->bfad_tsk)) { + printk(KERN_INFO "bfad[%d]: Kernel thread" + " creation failed!\n", + bfad->inst_no); + goto out_kthread_create_failure; + } + retval = bfad_drv_init(bfad); if (retval != BFA_STATUS_OK) goto out_drv_init_failure; if (!(bfad->bfad_flags & BFAD_HAL_INIT_DONE)) { + bfad->bfad_flags |= BFAD_HAL_INIT_FAIL; printk(KERN_WARNING "bfad%d: hal init failed\n", bfad->inst_no); goto ok; } - /* - * PPORT FCS config - */ - bfad_fcs_port_cfg(bfad); - - retval = bfad_cfg_pport(bfad, BFA_PORT_ROLE_FCP_IM); + retval = bfad_start_ops(bfad); if (retval != BFA_STATUS_OK) - goto out_cfg_pport_failure; + goto out_start_ops_failure; - /* - * BFAD level FC4 (IM/TM/IPFC) specific resource allocation - */ - retval = bfad_fc4_probe(bfad); - if (retval != BFA_STATUS_OK) { - printk(KERN_WARNING "bfad_fc4_probe failed\n"); - goto out_fc4_probe_failure; - } - - bfad_drv_start(bfad); + kthread_stop(bfad->bfad_tsk); + bfad->bfad_tsk = NULL; - /* - * If bfa_linkup_delay is set to -1 default; try to retrive the - * value using the bfad_os_get_linkup_delay(); else use the - * passed in module param value as the bfa_linkup_delay. - */ - if (bfa_linkup_delay < 0) { - bfa_linkup_delay = bfad_os_get_linkup_delay(bfad); - bfad_os_rport_online_wait(bfad); - bfa_linkup_delay = -1; - } else { - bfad_os_rport_online_wait(bfad); - } - - bfa_log(bfad->logmod, BFA_LOG_LINUX_DEVICE_CLAIMED, bfad->pci_name); ok: return 0; -out_fc4_probe_failure: - bfad_fc4_probe_undo(bfad); - bfad_uncfg_pport(bfad); -out_cfg_pport_failure: +out_start_ops_failure: bfad_drv_uninit(bfad); out_drv_init_failure: + kthread_stop(bfad->bfad_tsk); +out_kthread_create_failure: mutex_lock(&bfad_mutex); bfad_inst--; list_del(&bfad->list_entry); @@ -1012,6 +1104,11 @@ bfad_pci_remove(struct pci_dev *pdev) bfa_trc(bfad, bfad->inst_no); + spin_lock_irqsave(&bfad->bfad_lock, flags); + if (bfad->bfad_tsk != NULL) + kthread_stop(bfad->bfad_tsk); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + if ((bfad->bfad_flags & BFAD_DRV_INIT_DONE) && !(bfad->bfad_flags & BFAD_HAL_INIT_DONE)) { @@ -1028,13 +1125,25 @@ bfad_pci_remove(struct pci_dev *pdev) goto remove_sysfs; } - if (bfad->bfad_flags & BFAD_HAL_START_DONE) + if (bfad->bfad_flags & BFAD_HAL_START_DONE) { bfad_drv_stop(bfad); + } else if (bfad->bfad_flags & BFAD_DRV_INIT_DONE) { + /* Invoking bfa_stop() before bfa_detach + * when HAL and DRV init are success + * but HAL start did not occur. + */ + spin_lock_irqsave(&bfad->bfad_lock, flags); + init_completion(&bfad->comp); + bfa_stop(&bfad->bfa); + spin_unlock_irqrestore(&bfad->bfad_lock, flags); + wait_for_completion(&bfad->comp); + } bfad_remove_intr(bfad); - del_timer_sync(&bfad->hal_tmo); - bfad_fc4_probe_undo(bfad); + + if (bfad->bfad_flags & BFAD_FC4_PROBE_DONE) + bfad_fc4_probe_undo(bfad); if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE) bfad_uncfg_pport(bfad); diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h index 172c81e25c1c..9fa801a50250 100644 --- a/drivers/scsi/bfa/bfad_drv.h +++ b/drivers/scsi/bfa/bfad_drv.h @@ -62,7 +62,9 @@ #define BFAD_HAL_START_DONE 0x00000010 #define BFAD_PORT_ONLINE 0x00000020 #define BFAD_RPORT_ONLINE 0x00000040 - +#define BFAD_FCS_INIT_DONE 0x00000080 +#define BFAD_HAL_INIT_FAIL 0x00000100 +#define BFAD_FC4_PROBE_DONE 0x00000200 #define BFAD_PORT_DELETE 0x00000001 /* @@ -168,6 +170,7 @@ struct bfad_s { u32 inst_no; /* BFAD instance number */ u32 bfad_flags; spinlock_t bfad_lock; + struct task_struct *bfad_tsk; struct bfad_cfg_param_s cfg_data; struct bfad_msix_s msix_tab[MAX_MSIX_ENTRY]; int nvec; @@ -258,6 +261,7 @@ bfa_status_t bfad_vf_create(struct bfad_s *bfad, u16 vf_id, struct bfa_port_cfg_s *port_cfg); bfa_status_t bfad_cfg_pport(struct bfad_s *bfad, enum bfa_port_role role); bfa_status_t bfad_drv_init(struct bfad_s *bfad); +bfa_status_t bfad_start_ops(struct bfad_s *bfad); void bfad_drv_start(struct bfad_s *bfad); void bfad_uncfg_pport(struct bfad_s *bfad); void bfad_drv_stop(struct bfad_s *bfad); @@ -280,6 +284,12 @@ void bfad_drv_log_level_set(struct bfad_s *bfad); bfa_status_t bfad_fc4_module_init(void); void bfad_fc4_module_exit(void); +bfa_status_t bfad_os_kthread_create(struct bfad_s *bfad); +void bfad_os_kthread_stop(struct bfad_s *bfad); +void bfad_os_kthread_wakeup(struct bfad_s *bfad); +int bfad_os_kthread_should_stop(void); +int bfad_worker (void *ptr); + void bfad_pci_remove(struct pci_dev *pdev); int bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid); void bfad_os_rport_online_wait(struct bfad_s *bfad); diff --git a/drivers/scsi/bfa/fabric.c b/drivers/scsi/bfa/fabric.c index e22989886646..20a686a420a2 100644 --- a/drivers/scsi/bfa/fabric.c +++ b/drivers/scsi/bfa/fabric.c @@ -136,8 +136,7 @@ bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric, case BFA_FCS_FABRIC_SM_CREATE: bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created); bfa_fcs_fabric_init(fabric); - bfa_fcs_lport_init(&fabric->bport, fabric->fcs, FC_VF_ID_NULL, - &fabric->bport.port_cfg, NULL); + bfa_fcs_lport_init(&fabric->bport, &fabric->bport.port_cfg); break; case BFA_FCS_FABRIC_SM_LINK_UP: @@ -841,6 +840,7 @@ bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs) bfa_wc_up(&fabric->wc); /* For the base port */ bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); + bfa_fcs_lport_attach(&fabric->bport, fabric->fcs, FC_VF_ID_NULL, NULL); } void diff --git a/drivers/scsi/bfa/fcs_lport.h b/drivers/scsi/bfa/fcs_lport.h index ae744ba35671..a6508c8ab184 100644 --- a/drivers/scsi/bfa/fcs_lport.h +++ b/drivers/scsi/bfa/fcs_lport.h @@ -84,9 +84,10 @@ void bfa_fcs_port_uf_recv(struct bfa_fcs_port_s *lport, struct fchs_s *fchs, * Following routines will be called by Fabric to indicate port * online/offline to vport. */ -void bfa_fcs_lport_init(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs, - u16 vf_id, struct bfa_port_cfg_s *port_cfg, - struct bfa_fcs_vport_s *vport); +void bfa_fcs_lport_attach(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs, + uint16_t vf_id, struct bfa_fcs_vport_s *vport); +void bfa_fcs_lport_init(struct bfa_fcs_port_s *lport, + struct bfa_port_cfg_s *port_cfg); void bfa_fcs_port_online(struct bfa_fcs_port_s *port); void bfa_fcs_port_offline(struct bfa_fcs_port_s *port); void bfa_fcs_port_delete(struct bfa_fcs_port_s *port); diff --git a/drivers/scsi/bfa/vport.c b/drivers/scsi/bfa/vport.c index 3dce9e1c947d..13f737139379 100644 --- a/drivers/scsi/bfa/vport.c +++ b/drivers/scsi/bfa/vport.c @@ -662,7 +662,8 @@ bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs, vport->vport_drv = vport_drv; bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit); - bfa_fcs_lport_init(&vport->lport, fcs, vf_id, vport_cfg, vport); + bfa_fcs_lport_attach(&vport->lport, fcs, vf_id, vport); + bfa_fcs_lport_init(&vport->lport, vport_cfg); bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_CREATE); -- cgit v1.2.3 From 422d2cb8f9afadba1ecd3614f658b6daaaa480fb Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Fri, 26 Feb 2010 15:32:31 -0800 Subject: ceph: reset osd after relevant messages timed out This simplifies the process of timing out messages. We keep lru of current messages that are in flight. If a timeout has passed, we reset the osd connection, so that messages will be retransmitted. This is a failsafe in case we hit some sort of problem sending out message to the OSD. Normally, we'll get notification via an updated osdmap if there are problems. If a request is older than the keepalive timeout, send a keepalive to ensure we detect any breaks in the TCP connection. Signed-off-by: Yehuda Sadeh Signed-off-by: Sage Weil --- fs/ceph/osd_client.c | 153 +++++++++++++++++++++++++++++++++------------------ fs/ceph/osd_client.h | 6 +- fs/ceph/super.c | 8 ++- fs/ceph/super.h | 3 + 4 files changed, 113 insertions(+), 57 deletions(-) diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c index c4763bff97b4..dbe63db9762f 100644 --- a/fs/ceph/osd_client.c +++ b/fs/ceph/osd_client.c @@ -17,6 +17,8 @@ #define OSD_OPREPLY_FRONT_LEN 512 const static struct ceph_connection_operations osd_con_ops; +static int __kick_requests(struct ceph_osd_client *osdc, + struct ceph_osd *kickosd); static void kick_requests(struct ceph_osd_client *osdc, struct ceph_osd *osd); @@ -339,6 +341,7 @@ static struct ceph_osd *create_osd(struct ceph_osd_client *osdc) osd->o_con.ops = &osd_con_ops; osd->o_con.peer_name.type = CEPH_ENTITY_TYPE_OSD; + INIT_LIST_HEAD(&osd->o_keepalive_item); return osd; } @@ -461,6 +464,16 @@ static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o) return NULL; } +static void __schedule_osd_timeout(struct ceph_osd_client *osdc) +{ + schedule_delayed_work(&osdc->timeout_work, + osdc->client->mount_args->osd_keepalive_timeout * HZ); +} + +static void __cancel_osd_timeout(struct ceph_osd_client *osdc) +{ + cancel_delayed_work(&osdc->timeout_work); +} /* * Register request, assign tid. If this is the first request, set up @@ -472,21 +485,16 @@ static void register_request(struct ceph_osd_client *osdc, mutex_lock(&osdc->request_mutex); req->r_tid = ++osdc->last_tid; req->r_request->hdr.tid = cpu_to_le64(req->r_tid); + INIT_LIST_HEAD(&req->r_req_lru_item); dout("register_request %p tid %lld\n", req, req->r_tid); __insert_request(osdc, req); ceph_osdc_get_request(req); osdc->num_requests++; - req->r_timeout_stamp = - jiffies + osdc->client->mount_args->osd_timeout*HZ; - if (osdc->num_requests == 1) { - osdc->timeout_tid = req->r_tid; - dout(" timeout on tid %llu at %lu\n", req->r_tid, - req->r_timeout_stamp); - schedule_delayed_work(&osdc->timeout_work, - round_jiffies_relative(req->r_timeout_stamp - jiffies)); + dout(" first request, scheduling timeout\n"); + __schedule_osd_timeout(osdc); } mutex_unlock(&osdc->request_mutex); } @@ -513,21 +521,10 @@ static void __unregister_request(struct ceph_osd_client *osdc, ceph_osdc_put_request(req); - if (req->r_tid == osdc->timeout_tid) { - if (osdc->num_requests == 0) { - dout("no requests, canceling timeout\n"); - osdc->timeout_tid = 0; - cancel_delayed_work(&osdc->timeout_work); - } else { - req = rb_entry(rb_first(&osdc->requests), - struct ceph_osd_request, r_node); - osdc->timeout_tid = req->r_tid; - dout("rescheduled timeout on tid %llu at %lu\n", - req->r_tid, req->r_timeout_stamp); - schedule_delayed_work(&osdc->timeout_work, - round_jiffies_relative(req->r_timeout_stamp - - jiffies)); - } + list_del_init(&req->r_req_lru_item); + if (osdc->num_requests == 0) { + dout(" no requests, canceling timeout\n"); + __cancel_osd_timeout(osdc); } } @@ -540,6 +537,7 @@ static void __cancel_request(struct ceph_osd_request *req) ceph_con_revoke(&req->r_osd->o_con, req->r_request); req->r_sent = 0; } + list_del_init(&req->r_req_lru_item); } /* @@ -635,7 +633,8 @@ static int __send_request(struct ceph_osd_client *osdc, reqhead->flags |= cpu_to_le32(req->r_flags); /* e.g., RETRY */ reqhead->reassert_version = req->r_reassert_version; - req->r_timeout_stamp = jiffies+osdc->client->mount_args->osd_timeout*HZ; + req->r_sent_stamp = jiffies; + list_move_tail(&osdc->req_lru, &req->r_req_lru_item); ceph_msg_get(req->r_request); /* send consumes a ref */ ceph_con_send(&req->r_osd->o_con, req->r_request); @@ -656,11 +655,14 @@ static void handle_timeout(struct work_struct *work) { struct ceph_osd_client *osdc = container_of(work, struct ceph_osd_client, timeout_work.work); - struct ceph_osd_request *req; + struct ceph_osd_request *req, *last_req = NULL; struct ceph_osd *osd; unsigned long timeout = osdc->client->mount_args->osd_timeout * HZ; - unsigned long next_timeout = timeout + jiffies; + unsigned long keepalive = + osdc->client->mount_args->osd_keepalive_timeout * HZ; + unsigned long last_sent = 0; struct rb_node *p; + struct list_head slow_osds; dout("timeout\n"); down_read(&osdc->map_sem); @@ -683,25 +685,56 @@ static void handle_timeout(struct work_struct *work) continue; } } - for (p = rb_first(&osdc->osds); p; p = rb_next(p)) { - osd = rb_entry(p, struct ceph_osd, o_node); - if (list_empty(&osd->o_requests)) - continue; - req = list_first_entry(&osd->o_requests, - struct ceph_osd_request, r_osd_item); - if (time_before(jiffies, req->r_timeout_stamp)) - continue; - dout(" tid %llu (at least) timed out on osd%d\n", + /* + * reset osds that appear to be _really_ unresponsive. this + * is a failsafe measure.. we really shouldn't be getting to + * this point if the system is working properly. the monitors + * should mark the osd as failed and we should find out about + * it from an updated osd map. + */ + while (!list_empty(&osdc->req_lru)) { + req = list_entry(osdc->req_lru.next, struct ceph_osd_request, + r_req_lru_item); + + if (time_before(jiffies, req->r_sent_stamp + timeout)) + break; + + BUG_ON(req == last_req && req->r_sent_stamp == last_sent); + last_req = req; + last_sent = req->r_sent_stamp; + + osd = req->r_osd; + BUG_ON(!osd); + pr_warning(" tid %llu timed out on osd%d, will reset osd\n", + req->r_tid, osd->o_osd); + __kick_requests(osdc, osd); + } + + /* + * ping osds that are a bit slow. this ensures that if there + * is a break in the TCP connection we will notice, and reopen + * a connection with that osd (from the fault callback). + */ + INIT_LIST_HEAD(&slow_osds); + list_for_each_entry(req, &osdc->req_lru, r_req_lru_item) { + if (time_before(jiffies, req->r_sent_stamp + keepalive)) + break; + + osd = req->r_osd; + BUG_ON(!osd); + dout(" tid %llu is slow, will send keepalive on osd%d\n", req->r_tid, osd->o_osd); - req->r_timeout_stamp = next_timeout; + list_move_tail(&osd->o_keepalive_item, &slow_osds); + } + while (!list_empty(&slow_osds)) { + osd = list_entry(slow_osds.next, struct ceph_osd, + o_keepalive_item); + list_del_init(&osd->o_keepalive_item); ceph_con_keepalive(&osd->o_con); } - if (osdc->timeout_tid) - schedule_delayed_work(&osdc->timeout_work, - round_jiffies_relative(timeout)); - + __schedule_osd_timeout(osdc); mutex_unlock(&osdc->request_mutex); up_read(&osdc->map_sem); @@ -819,18 +852,7 @@ bad: } -/* - * Resubmit osd requests whose osd or osd address has changed. Request - * a new osd map if osds are down, or we are otherwise unable to determine - * how to direct a request. - * - * Close connections to down osds. - * - * If @who is specified, resubmit requests for that specific osd. - * - * Caller should hold map_sem for read and request_mutex. - */ -static void kick_requests(struct ceph_osd_client *osdc, +static int __kick_requests(struct ceph_osd_client *osdc, struct ceph_osd *kickosd) { struct ceph_osd_request *req; @@ -839,7 +861,6 @@ static void kick_requests(struct ceph_osd_client *osdc, int err; dout("kick_requests osd%d\n", kickosd ? kickosd->o_osd : -1); - mutex_lock(&osdc->request_mutex); if (kickosd) { __reset_osd(osdc, kickosd); } else { @@ -900,14 +921,36 @@ kick: req->r_resend = true; } } + + return needmap; +} + +/* + * Resubmit osd requests whose osd or osd address has changed. Request + * a new osd map if osds are down, or we are otherwise unable to determine + * how to direct a request. + * + * Close connections to down osds. + * + * If @who is specified, resubmit requests for that specific osd. + * + * Caller should hold map_sem for read and request_mutex. + */ +static void kick_requests(struct ceph_osd_client *osdc, + struct ceph_osd *kickosd) +{ + int needmap; + + mutex_lock(&osdc->request_mutex); + needmap = __kick_requests(osdc, kickosd); mutex_unlock(&osdc->request_mutex); if (needmap) { dout("%d requests for down osds, need new map\n", needmap); ceph_monc_request_next_osdmap(&osdc->client->monc); } -} +} /* * Process updated osd map. * @@ -1164,11 +1207,11 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client) init_completion(&osdc->map_waiters); osdc->last_requested_map = 0; mutex_init(&osdc->request_mutex); - osdc->timeout_tid = 0; osdc->last_tid = 0; osdc->osds = RB_ROOT; INIT_LIST_HEAD(&osdc->osd_lru); osdc->requests = RB_ROOT; + INIT_LIST_HEAD(&osdc->req_lru); osdc->num_requests = 0; INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout); INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout); diff --git a/fs/ceph/osd_client.h b/fs/ceph/osd_client.h index f256eba6fe7a..1b1a3ca43afc 100644 --- a/fs/ceph/osd_client.h +++ b/fs/ceph/osd_client.h @@ -36,12 +36,15 @@ struct ceph_osd { void *o_authorizer_buf, *o_authorizer_reply_buf; size_t o_authorizer_buf_len, o_authorizer_reply_buf_len; unsigned long lru_ttl; + int o_marked_for_keepalive; + struct list_head o_keepalive_item; }; /* an in-flight request */ struct ceph_osd_request { u64 r_tid; /* unique for this client */ struct rb_node r_node; + struct list_head r_req_lru_item; struct list_head r_osd_item; struct ceph_osd *r_osd; struct ceph_pg r_pgid; @@ -67,7 +70,7 @@ struct ceph_osd_request { char r_oid[40]; /* object name */ int r_oid_len; - unsigned long r_timeout_stamp; + unsigned long r_sent_stamp; bool r_resend; /* msg send failed, needs retry */ struct ceph_file_layout r_file_layout; @@ -92,6 +95,7 @@ struct ceph_osd_client { u64 timeout_tid; /* tid of timeout triggering rq */ u64 last_tid; /* tid of last request */ struct rb_root requests; /* pending requests */ + struct list_head req_lru; /* pending requests lru */ int num_requests; struct delayed_work timeout_work; struct delayed_work osds_timeout_work; diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 74953be75f8f..4290a6e860b0 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -292,6 +292,7 @@ enum { Opt_wsize, Opt_rsize, Opt_osdtimeout, + Opt_osdkeepalivetimeout, Opt_mount_timeout, Opt_osd_idle_ttl, Opt_caps_wanted_delay_min, @@ -322,6 +323,7 @@ static match_table_t arg_tokens = { {Opt_wsize, "wsize=%d"}, {Opt_rsize, "rsize=%d"}, {Opt_osdtimeout, "osdtimeout=%d"}, + {Opt_osdkeepalivetimeout, "osdkeepalive=%d"}, {Opt_mount_timeout, "mount_timeout=%d"}, {Opt_osd_idle_ttl, "osd_idle_ttl=%d"}, {Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"}, @@ -367,7 +369,8 @@ static struct ceph_mount_args *parse_mount_args(int flags, char *options, /* start with defaults */ args->sb_flags = flags; args->flags = CEPH_OPT_DEFAULT; - args->osd_timeout = 5; /* seconds */ + args->osd_timeout = CEPH_OSD_TIMEOUT_DEFAULT; + args->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT; args->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; /* seconds */ args->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT; /* seconds */ args->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT; @@ -468,6 +471,9 @@ static struct ceph_mount_args *parse_mount_args(int flags, char *options, case Opt_osdtimeout: args->osd_timeout = intval; break; + case Opt_osdkeepalivetimeout: + args->osd_keepalive_timeout = intval; + break; case Opt_mount_timeout: args->mount_timeout = intval; break; diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 6a778f2c3f6e..02c0ddcf3eaf 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -62,6 +62,7 @@ struct ceph_mount_args { int max_readdir; /* max readdir size */ int congestion_kb; /* max readdir size */ int osd_timeout; + int osd_keepalive_timeout; char *snapdir_name; /* default ".snap" */ char *name; char *secret; @@ -72,6 +73,8 @@ struct ceph_mount_args { * defaults */ #define CEPH_MOUNT_TIMEOUT_DEFAULT 60 +#define CEPH_OSD_TIMEOUT_DEFAULT 60 /* seconds */ +#define CEPH_OSD_KEEPALIVE_DEFAULT 5 #define CEPH_OSD_IDLE_TTL_DEFAULT 60 #define CEPH_MOUNT_RSIZE_DEFAULT (512*1024) /* readahead */ -- cgit v1.2.3 From f1a3d57213fe264b4cf584e78bac36aaf9998729 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Mon, 18 Jan 2010 11:53:08 +1100 Subject: ceph: update for write_inode API change Signed-off-by: Stephen Rothwell Signed-off-by: Sage Weil --- fs/ceph/caps.c | 4 +++- fs/ceph/super.h | 3 ++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 8b89b9123252..db122bb357b8 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -5,6 +5,7 @@ #include #include #include +#include #include "super.h" #include "decode.h" @@ -1801,12 +1802,13 @@ int ceph_fsync(struct file *file, struct dentry *dentry, int datasync) * get by with fewer MDS messages if we wait for data writeback to * complete first. */ -int ceph_write_inode(struct inode *inode, int wait) +int ceph_write_inode(struct inode *inode, struct writeback_control *wbc) { struct ceph_inode_info *ci = ceph_inode(inode); unsigned flush_tid; int err = 0; int dirty; + int wait = wbc->sync_mode == WB_SYNC_ALL; dout("write_inode %p wait=%d\n", inode, wait); if (wait) { diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 02c0ddcf3eaf..65d12036b670 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -11,6 +11,7 @@ #include #include #include +#include #include "types.h" #include "messenger.h" @@ -811,7 +812,7 @@ static inline void ceph_remove_cap(struct ceph_cap *cap) extern void ceph_put_cap(struct ceph_cap *cap); extern void ceph_queue_caps_release(struct inode *inode); -extern int ceph_write_inode(struct inode *inode, int unused); +extern int ceph_write_inode(struct inode *inode, struct writeback_control *wbc); extern int ceph_fsync(struct file *file, struct dentry *dentry, int datasync); extern void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc, struct ceph_mds_session *session); -- cgit v1.2.3 From df2cf170c823ba779ca339e3ede347c87f4dc6a9 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Fri, 12 Feb 2010 07:44:16 -0500 Subject: cifs: overhaul cifs_revalidate and rename to cifs_revalidate_dentry cifs_revalidate is renamed to cifs_revalidate_dentry as a later patch will add a by-filehandle variant. Add a new "invalid_mapping" flag to the cifsInodeInfo that indicates that the pagecache is considered invalid. Add a new routine to check inode attributes whenever they're updated and set that flag if the inode has changed on the server. cifs_revalidate_dentry is then changed to just update the attrcache if needed and then to zap the pagecache if it's not valid. There are some other behavior changes in here as well. Open files are now allowed to have their caches invalidated. I see no reason why we'd want to keep stale data around just because a file is open. Also, cifs_revalidate_cache uses the server_eof for revalidating the file size since that should more closely match the size of the file on the server. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifsfs.c | 3 +- fs/cifs/cifsfs.h | 2 +- fs/cifs/cifsglob.h | 1 + fs/cifs/dir.c | 2 +- fs/cifs/file.c | 2 +- fs/cifs/inode.c | 211 +++++++++++++++++++++++++---------------------------- 6 files changed, 104 insertions(+), 117 deletions(-) diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 8c6a03627176..cf85a4165b01 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -312,6 +312,7 @@ cifs_alloc_inode(struct super_block *sb) cifs_inode->clientCanCacheRead = false; cifs_inode->clientCanCacheAll = false; cifs_inode->delete_pending = false; + cifs_inode->invalid_mapping = false; cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */ cifs_inode->server_eof = 0; @@ -638,7 +639,7 @@ static loff_t cifs_llseek(struct file *file, loff_t offset, int origin) setting the revalidate time to zero */ CIFS_I(file->f_path.dentry->d_inode)->time = 0; - retval = cifs_revalidate(file->f_path.dentry); + retval = cifs_revalidate_dentry(file->f_path.dentry); if (retval < 0) return (loff_t)retval; } diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 78c1b86d55f6..2af995ca8955 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -61,7 +61,7 @@ extern int cifs_mkdir(struct inode *, struct dentry *, int); extern int cifs_rmdir(struct inode *, struct dentry *); extern int cifs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *); -extern int cifs_revalidate(struct dentry *); +extern int cifs_revalidate_dentry(struct dentry *); extern int cifs_getattr(struct vfsmount *, struct dentry *, struct kstat *); extern int cifs_setattr(struct dentry *, struct iattr *); diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index a1c817eb291a..63c89d1d70b5 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -389,6 +389,7 @@ struct cifsInodeInfo { bool clientCanCacheRead:1; /* read oplock */ bool clientCanCacheAll:1; /* read and writebehind oplock */ bool delete_pending:1; /* DELETE_ON_CLOSE is set */ + bool invalid_mapping:1; /* pagecache is invalid */ u64 server_eof; /* current file size on server */ u64 uniqueid; /* server inode number */ struct inode vfs_inode; diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 6ccf7262d1b7..e9f7ecc2714b 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -739,7 +739,7 @@ cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd) int isValid = 1; if (direntry->d_inode) { - if (cifs_revalidate(direntry)) + if (cifs_revalidate_dentry(direntry)) return 0; } else { cFYI(1, ("neg dentry 0x%p name = %s", diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 3d8f8a96f5a3..b90f8f2ca85c 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -1894,7 +1894,7 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma) int rc, xid; xid = GetXid(); - rc = cifs_revalidate(dentry); + rc = cifs_revalidate_dentry(dentry); if (rc) { cFYI(1, ("Validation prior to mmap failed, error=%d", rc)); FreeXid(xid); diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 8bdbc818164c..f050dba920cb 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -77,6 +77,41 @@ static void cifs_set_ops(struct inode *inode, const bool is_dfs_referral) } } +/* check inode attributes against fattr. If they don't match, tag the + * inode for cache invalidation + */ +static void +cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr) +{ + struct cifsInodeInfo *cifs_i = CIFS_I(inode); + + cFYI(1, ("%s: revalidating inode %llu", __func__, cifs_i->uniqueid)); + + if (inode->i_state & I_NEW) { + cFYI(1, ("%s: inode %llu is new", __func__, cifs_i->uniqueid)); + return; + } + + /* don't bother with revalidation if we have an oplock */ + if (cifs_i->clientCanCacheRead) { + cFYI(1, ("%s: inode %llu is oplocked", __func__, + cifs_i->uniqueid)); + return; + } + + /* revalidate if mtime or size have changed */ + if (timespec_equal(&inode->i_mtime, &fattr->cf_mtime) && + cifs_i->server_eof == fattr->cf_eof) { + cFYI(1, ("%s: inode %llu is unchanged", __func__, + cifs_i->uniqueid)); + return; + } + + cFYI(1, ("%s: invalidating inode %llu mapping", __func__, + cifs_i->uniqueid)); + cifs_i->invalid_mapping = true; +} + /* populate an inode with info from a cifs_fattr struct */ void cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) @@ -85,6 +120,8 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); unsigned long oldtime = cifs_i->time; + cifs_revalidate_cache(inode, fattr); + inode->i_atime = fattr->cf_atime; inode->i_mtime = fattr->cf_mtime; inode->i_ctime = fattr->cf_ctime; @@ -1389,135 +1426,83 @@ cifs_rename_exit: return rc; } -int cifs_revalidate(struct dentry *direntry) +static bool +cifs_inode_needs_reval(struct inode *inode) { - int xid; - int rc = 0, wbrc = 0; - char *full_path; - struct cifs_sb_info *cifs_sb; - struct cifsInodeInfo *cifsInode; - loff_t local_size; - struct timespec local_mtime; - bool invalidate_inode = false; + struct cifsInodeInfo *cifs_i = CIFS_I(inode); - if (direntry->d_inode == NULL) - return -ENOENT; + if (cifs_i->clientCanCacheRead) + return false; - cifsInode = CIFS_I(direntry->d_inode); + if (!lookupCacheEnabled) + return true; - if (cifsInode == NULL) - return -ENOENT; + if (cifs_i->time == 0) + return true; - /* no sense revalidating inode info on file that no one can write */ - if (CIFS_I(direntry->d_inode)->clientCanCacheRead) - return rc; + /* FIXME: the actimeo should be tunable */ + if (time_after_eq(jiffies, cifs_i->time + HZ)) + return true; + + return false; +} + +/* check invalid_mapping flag and zap the cache if it's set */ +static void +cifs_invalidate_mapping(struct inode *inode) +{ + int rc; + struct cifsInodeInfo *cifs_i = CIFS_I(inode); + + cifs_i->invalid_mapping = false; + + /* write back any cached data */ + if (inode->i_mapping && inode->i_mapping->nrpages != 0) { + rc = filemap_write_and_wait(inode->i_mapping); + if (rc) + cifs_i->write_behind_rc = rc; + } + invalidate_remote_inode(inode); +} + +/* revalidate a dentry's inode attributes */ +int cifs_revalidate_dentry(struct dentry *dentry) +{ + int xid; + int rc = 0; + char *full_path = NULL; + struct inode *inode = dentry->d_inode; + struct super_block *sb = dentry->d_sb; + + if (inode == NULL) + return -ENOENT; xid = GetXid(); - cifs_sb = CIFS_SB(direntry->d_sb); + if (!cifs_inode_needs_reval(inode)) + goto check_inval; /* can not safely grab the rename sem here if rename calls revalidate since that would deadlock */ - full_path = build_path_from_dentry(direntry); + full_path = build_path_from_dentry(dentry); if (full_path == NULL) { rc = -ENOMEM; - FreeXid(xid); - return rc; + goto check_inval; } - cFYI(1, ("Revalidate: %s inode 0x%p count %d dentry: 0x%p d_time %ld " - "jiffies %ld", full_path, direntry->d_inode, - direntry->d_inode->i_count.counter, direntry, - direntry->d_time, jiffies)); - - if (cifsInode->time == 0) { - /* was set to zero previously to force revalidate */ - } else if (time_before(jiffies, cifsInode->time + HZ) && - lookupCacheEnabled) { - if ((S_ISREG(direntry->d_inode->i_mode) == 0) || - (direntry->d_inode->i_nlink == 1)) { - kfree(full_path); - FreeXid(xid); - return rc; - } else { - cFYI(1, ("Have to revalidate file due to hardlinks")); - } - } - - /* save mtime and size */ - local_mtime = direntry->d_inode->i_mtime; - local_size = direntry->d_inode->i_size; - if (cifs_sb->tcon->unix_ext) { - rc = cifs_get_inode_info_unix(&direntry->d_inode, full_path, - direntry->d_sb, xid); - if (rc) { - cFYI(1, ("error on getting revalidate info %d", rc)); -/* if (rc != -ENOENT) - rc = 0; */ /* BB should we cache info on - certain errors? */ - } - } else { - rc = cifs_get_inode_info(&direntry->d_inode, full_path, NULL, - direntry->d_sb, xid, NULL); - if (rc) { - cFYI(1, ("error on getting revalidate info %d", rc)); -/* if (rc != -ENOENT) - rc = 0; */ /* BB should we cache info on - certain errors? */ - } - } - /* should we remap certain errors, access denied?, to zero */ - - /* if not oplocked, we invalidate inode pages if mtime or file size - had changed on server */ + cFYI(1, ("Revalidate: %s inode 0x%p count %d dentry: 0x%p d_time %ld " + "jiffies %ld", full_path, inode, inode->i_count.counter, + dentry, dentry->d_time, jiffies)); - if (timespec_equal(&local_mtime, &direntry->d_inode->i_mtime) && - (local_size == direntry->d_inode->i_size)) { - cFYI(1, ("cifs_revalidate - inode unchanged")); - } else { - /* file may have changed on server */ - if (cifsInode->clientCanCacheRead) { - /* no need to invalidate inode pages since we were the - only ones who could have modified the file and the - server copy is staler than ours */ - } else { - invalidate_inode = true; - } - } + if (CIFS_SB(sb)->tcon->unix_ext) + rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid); + else + rc = cifs_get_inode_info(&inode, full_path, NULL, sb, + xid, NULL); - /* can not grab this sem since kernel filesys locking documentation - indicates i_mutex may be taken by the kernel on lookup and rename - which could deadlock if we grab the i_mutex here as well */ -/* mutex_lock(&direntry->d_inode->i_mutex);*/ - /* need to write out dirty pages here */ - if (direntry->d_inode->i_mapping) { - /* do we need to lock inode until after invalidate completes - below? */ - wbrc = filemap_fdatawrite(direntry->d_inode->i_mapping); - if (wbrc) - CIFS_I(direntry->d_inode)->write_behind_rc = wbrc; - } - if (invalidate_inode) { - /* shrink_dcache not necessary now that cifs dentry ops - are exported for negative dentries */ -/* if (S_ISDIR(direntry->d_inode->i_mode)) - shrink_dcache_parent(direntry); */ - if (S_ISREG(direntry->d_inode->i_mode)) { - if (direntry->d_inode->i_mapping) { - wbrc = filemap_fdatawait(direntry->d_inode->i_mapping); - if (wbrc) - CIFS_I(direntry->d_inode)->write_behind_rc = wbrc; - } - /* may eventually have to do this for open files too */ - if (list_empty(&(cifsInode->openFileList))) { - /* changed on server - flush read ahead pages */ - cFYI(1, ("Invalidating read ahead data on " - "closed file")); - invalidate_remote_inode(direntry->d_inode); - } - } - } -/* mutex_unlock(&direntry->d_inode->i_mutex); */ +check_inval: + if (CIFS_I(inode)->invalid_mapping) + cifs_invalidate_mapping(inode); kfree(full_path); FreeXid(xid); @@ -1527,7 +1512,7 @@ int cifs_revalidate(struct dentry *direntry) int cifs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { - int err = cifs_revalidate(dentry); + int err = cifs_revalidate_dentry(dentry); if (!err) { generic_fillattr(dentry->d_inode, stat); stat->blksize = CIFS_MAX_MSGSIZE; -- cgit v1.2.3 From bcd5357f430363376565d07ca542127d6d36602c Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Fri, 12 Feb 2010 07:44:16 -0500 Subject: cifs: add a CIFSSMBQFileInfo function ...to get inode attributes via filehandle instead of by path. In some places, we need to revalidate an inode on an open filehandle, but we can't necessarily guarantee that the dentry associated with it will still be valid. When we have an open filehandle already, it makes more sense to do a filehandle based operation anyway. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifsproto.h | 2 ++ fs/cifs/cifssmb.c | 64 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 66 insertions(+) diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 88e2bc44ac58..bf2bff14c24f 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -142,6 +142,8 @@ extern int CIFSFindNext(const int xid, struct cifsTconInfo *tcon, extern int CIFSFindClose(const int, struct cifsTconInfo *tcon, const __u16 search_handle); +extern int CIFSSMBQFileInfo(const int xid, struct cifsTconInfo *tcon, + u16 netfid, FILE_ALL_INFO *pFindData); extern int CIFSSMBQPathInfo(const int xid, struct cifsTconInfo *tcon, const unsigned char *searchName, FILE_ALL_INFO *findData, diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 9d17df3e0768..4ed97825db3d 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -3230,8 +3230,72 @@ QInfRetry: return rc; } +int +CIFSSMBQFileInfo(const int xid, struct cifsTconInfo *tcon, + u16 netfid, FILE_ALL_INFO *pFindData) +{ + struct smb_t2_qfi_req *pSMB = NULL; + struct smb_t2_qfi_rsp *pSMBr = NULL; + int rc = 0; + int bytes_returned; + __u16 params, byte_count; + +QFileInfoRetry: + rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, + (void **) &pSMBr); + if (rc) + return rc; + + params = 2 /* level */ + 2 /* fid */; + pSMB->t2.TotalDataCount = 0; + pSMB->t2.MaxParameterCount = cpu_to_le16(4); + /* BB find exact max data count below from sess structure BB */ + pSMB->t2.MaxDataCount = cpu_to_le16(CIFSMaxBufSize); + pSMB->t2.MaxSetupCount = 0; + pSMB->t2.Reserved = 0; + pSMB->t2.Flags = 0; + pSMB->t2.Timeout = 0; + pSMB->t2.Reserved2 = 0; + pSMB->t2.ParameterOffset = cpu_to_le16(offsetof(struct smb_t2_qfi_req, + Fid) - 4); + pSMB->t2.DataCount = 0; + pSMB->t2.DataOffset = 0; + pSMB->t2.SetupCount = 1; + pSMB->t2.Reserved3 = 0; + pSMB->t2.SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION); + byte_count = params + 1 /* pad */ ; + pSMB->t2.TotalParameterCount = cpu_to_le16(params); + pSMB->t2.ParameterCount = pSMB->t2.TotalParameterCount; + pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_ALL_INFO); + pSMB->Pad = 0; + pSMB->Fid = netfid; + pSMB->hdr.smb_buf_length += byte_count; + + rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, + (struct smb_hdr *) pSMBr, &bytes_returned, 0); + if (rc) { + cFYI(1, ("Send error in QPathInfo = %d", rc)); + } else { /* decode response */ + rc = validate_t2((struct smb_t2_rsp *)pSMBr); + if (rc) /* BB add auto retry on EOPNOTSUPP? */ + rc = -EIO; + else if (pSMBr->ByteCount < 40) + rc = -EIO; /* bad smb */ + else if (pFindData) { + __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); + memcpy((char *) pFindData, + (char *) &pSMBr->hdr.Protocol + + data_offset, sizeof(FILE_ALL_INFO)); + } else + rc = -ENOMEM; + } + cifs_buf_release(pSMB); + if (rc == -EAGAIN) + goto QFileInfoRetry; + return rc; +} int CIFSSMBQPathInfo(const int xid, struct cifsTconInfo *tcon, -- cgit v1.2.3 From c8634fd3115497ac311f57be9c12f993437745cf Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Fri, 12 Feb 2010 07:44:17 -0500 Subject: cifs: add a CIFSSMBUnixQFileInfo function ...to allow us to get unix attrs via filehandle. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifsproto.h | 2 ++ fs/cifs/cifssmb.c | 69 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 71 insertions(+) diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index bf2bff14c24f..ce9199f0af9d 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -154,6 +154,8 @@ extern int SMBQueryInformation(const int xid, struct cifsTconInfo *tcon, FILE_ALL_INFO *findData, const struct nls_table *nls_codepage, int remap); +extern int CIFSSMBUnixQFileInfo(const int xid, struct cifsTconInfo *tcon, + u16 netfid, FILE_UNIX_BASIC_INFO *pFindData); extern int CIFSSMBUnixQPathInfo(const int xid, struct cifsTconInfo *tcon, const unsigned char *searchName, diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 4ed97825db3d..903d53871da7 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -3398,6 +3398,75 @@ QPathInfoRetry: return rc; } +int +CIFSSMBUnixQFileInfo(const int xid, struct cifsTconInfo *tcon, + u16 netfid, FILE_UNIX_BASIC_INFO *pFindData) +{ + struct smb_t2_qfi_req *pSMB = NULL; + struct smb_t2_qfi_rsp *pSMBr = NULL; + int rc = 0; + int bytes_returned; + __u16 params, byte_count; + +UnixQFileInfoRetry: + rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, + (void **) &pSMBr); + if (rc) + return rc; + + params = 2 /* level */ + 2 /* fid */; + pSMB->t2.TotalDataCount = 0; + pSMB->t2.MaxParameterCount = cpu_to_le16(4); + /* BB find exact max data count below from sess structure BB */ + pSMB->t2.MaxDataCount = cpu_to_le16(CIFSMaxBufSize); + pSMB->t2.MaxSetupCount = 0; + pSMB->t2.Reserved = 0; + pSMB->t2.Flags = 0; + pSMB->t2.Timeout = 0; + pSMB->t2.Reserved2 = 0; + pSMB->t2.ParameterOffset = cpu_to_le16(offsetof(struct smb_t2_qfi_req, + Fid) - 4); + pSMB->t2.DataCount = 0; + pSMB->t2.DataOffset = 0; + pSMB->t2.SetupCount = 1; + pSMB->t2.Reserved3 = 0; + pSMB->t2.SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION); + byte_count = params + 1 /* pad */ ; + pSMB->t2.TotalParameterCount = cpu_to_le16(params); + pSMB->t2.ParameterCount = pSMB->t2.TotalParameterCount; + pSMB->InformationLevel = cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC); + pSMB->Pad = 0; + pSMB->Fid = netfid; + pSMB->hdr.smb_buf_length += byte_count; + + rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, + (struct smb_hdr *) pSMBr, &bytes_returned, 0); + if (rc) { + cFYI(1, ("Send error in QPathInfo = %d", rc)); + } else { /* decode response */ + rc = validate_t2((struct smb_t2_rsp *)pSMBr); + + if (rc || (pSMBr->ByteCount < sizeof(FILE_UNIX_BASIC_INFO))) { + cERROR(1, ("Malformed FILE_UNIX_BASIC_INFO response.\n" + "Unix Extensions can be disabled on mount " + "by specifying the nosfu mount option.")); + rc = -EIO; /* bad smb */ + } else { + __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); + memcpy((char *) pFindData, + (char *) &pSMBr->hdr.Protocol + + data_offset, + sizeof(FILE_UNIX_BASIC_INFO)); + } + } + + cifs_buf_release(pSMB); + if (rc == -EAGAIN) + goto UnixQFileInfoRetry; + + return rc; +} + int CIFSSMBUnixQPathInfo(const int xid, struct cifsTconInfo *tcon, const unsigned char *searchName, -- cgit v1.2.3 From 0a20de446c76529028cb239bf2a13cb0f05b263a Mon Sep 17 00:00:00 2001 From: Krishna Gudipati Date: Fri, 5 Mar 2010 19:34:20 -0800 Subject: [SCSI] bfa: IOC changes: Support faster recovery and split bfa_ioc.c into ASIC specific code. Add support for faster IOC recovery after failure. Split bfa_ioc.c into three files: bfa_ioc.c: Common code shared between crossbow and catapult ASIC's. bfa_ioc_cb.c: Code specific to the crossbow, reg mapping and interrupt related routines. bfa_ioc_ct.c: Code specific to the catapult, reg mapping and interrupt related routines. Fix to make sure IOC reinitialize's properly on enable request - update the ioc_fwstate reg with BFI_IOC_FAIL on ioc disable mbox cmd timeout. Makefile changes to support the 2 newly added files bfa_ioc_cb.c and bfa_ioc_ct.c. Signed-off-by: Krishna Gudipati Signed-off-by: James Bottomley --- drivers/scsi/bfa/Makefile | 8 +- drivers/scsi/bfa/bfa_core.c | 10 + drivers/scsi/bfa/bfa_ioc.c | 546 +++++--------------------- drivers/scsi/bfa/bfa_ioc.h | 45 ++- drivers/scsi/bfa/bfa_ioc_cb.c | 273 +++++++++++++ drivers/scsi/bfa/bfa_ioc_ct.c | 422 ++++++++++++++++++++ drivers/scsi/bfa/include/bfa.h | 1 + drivers/scsi/bfa/include/bfa_timer.h | 2 +- drivers/scsi/bfa/include/bfi/bfi_cbreg.h | 3 +- drivers/scsi/bfa/include/bfi/bfi_ctreg.h | 2 + drivers/scsi/bfa/include/bfi/bfi_ioc.h | 2 +- drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h | 4 + 12 files changed, 859 insertions(+), 459 deletions(-) create mode 100644 drivers/scsi/bfa/bfa_ioc_cb.c create mode 100644 drivers/scsi/bfa/bfa_ioc_ct.c diff --git a/drivers/scsi/bfa/Makefile b/drivers/scsi/bfa/Makefile index 1d6009490d1c..17e06cae71b2 100644 --- a/drivers/scsi/bfa/Makefile +++ b/drivers/scsi/bfa/Makefile @@ -2,14 +2,14 @@ obj-$(CONFIG_SCSI_BFA_FC) := bfa.o bfa-y := bfad.o bfad_intr.o bfad_os.o bfad_im.o bfad_attr.o bfad_fwimg.o -bfa-y += bfa_core.o bfa_ioc.o bfa_iocfc.o bfa_fcxp.o bfa_lps.o -bfa-y += bfa_hw_cb.o bfa_hw_ct.o bfa_intr.o bfa_timer.o bfa_rport.o +bfa-y += bfa_core.o bfa_ioc.o bfa_ioc_ct.o bfa_ioc_cb.o bfa_iocfc.o bfa_fcxp.o +bfa-y += bfa_lps.o bfa_hw_cb.o bfa_hw_ct.o bfa_intr.o bfa_timer.o bfa_rport.o bfa-y += bfa_fcport.o bfa_port.o bfa_uf.o bfa_sgpg.o bfa_module.o bfa_ioim.o bfa-y += bfa_itnim.o bfa_fcpim.o bfa_tskim.o bfa_log.o bfa_log_module.o bfa-y += bfa_csdebug.o bfa_sm.o plog.o -bfa-y += fcbuild.o fabric.o fcpim.o vfapi.o fcptm.o bfa_fcs.o bfa_fcs_port.o +bfa-y += fcbuild.o fabric.o fcpim.o vfapi.o fcptm.o bfa_fcs.o bfa_fcs_port.o bfa-y += bfa_fcs_uf.o bfa_fcs_lport.o fab.o fdmi.o ms.o ns.o scn.o loop.o bfa-y += lport_api.o n2n.o rport.o rport_api.o rport_ftrs.o vport.o -ccflags-y := -I$(obj) -I$(obj)/include -I$(obj)/include/cna +ccflags-y := -I$(obj) -I$(obj)/include -I$(obj)/include/cna -DBFA_PERF_BUILD diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c index 44e2d1155c51..72e3f2f63b2e 100644 --- a/drivers/scsi/bfa/bfa_core.c +++ b/drivers/scsi/bfa/bfa_core.c @@ -399,4 +399,14 @@ bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen) { return bfa_ioc_debug_fwtrc(&bfa->ioc, trcdata, trclen); } + +/** + * Reset hw semaphore & usage cnt regs and initialize. + */ +void +bfa_chip_reset(struct bfa_s *bfa) +{ + bfa_ioc_ownership_reset(&bfa->ioc); + bfa_ioc_pll_init(&bfa->ioc); +} #endif diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c index 569b35d19a25..a5f9745315b1 100644 --- a/drivers/scsi/bfa/bfa_ioc.c +++ b/drivers/scsi/bfa/bfa_ioc.c @@ -33,12 +33,11 @@ BFA_TRC_FILE(HAL, IOC); * IOC local definitions */ #define BFA_IOC_TOV 2000 /* msecs */ -#define BFA_IOC_HB_TOV 1000 /* msecs */ -#define BFA_IOC_HB_FAIL_MAX 4 -#define BFA_IOC_HWINIT_MAX 2 +#define BFA_IOC_HWSEM_TOV 500 /* msecs */ +#define BFA_IOC_HB_TOV 500 /* msecs */ +#define BFA_IOC_HWINIT_MAX 2 #define BFA_IOC_FWIMG_MINSZ (16 * 1024) -#define BFA_IOC_TOV_RECOVER (BFA_IOC_HB_FAIL_MAX * BFA_IOC_HB_TOV \ - + BFA_IOC_TOV) +#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV #define bfa_ioc_timer_start(__ioc) \ bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \ @@ -51,11 +50,24 @@ BFA_TRC_FILE(HAL, IOC); (sizeof(struct bfa_trc_mod_s) - \ BFA_TRC_MAX * sizeof(struct bfa_trc_s))) #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn)) -#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++) -#define BFA_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS) -#define BFA_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS) -#define BFA_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS) +/** + * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. + */ + +#define bfa_ioc_firmware_lock(__ioc) \ + ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc)) +#define bfa_ioc_firmware_unlock(__ioc) \ + ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc)) +#define bfa_ioc_fwimg_get_chunk(__ioc, __off) \ + ((__ioc)->ioc_hwif->ioc_fwimg_get_chunk(__ioc, __off)) +#define bfa_ioc_fwimg_get_size(__ioc) \ + ((__ioc)->ioc_hwif->ioc_fwimg_get_size(__ioc)) +#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc)) +#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) +#define bfa_ioc_notify_hbfail(__ioc) \ + ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc)) + bfa_boolean_t bfa_auto_recover = BFA_TRUE; /* @@ -64,7 +76,6 @@ bfa_boolean_t bfa_auto_recover = BFA_TRUE; static void bfa_ioc_aen_post(struct bfa_ioc_s *bfa, enum bfa_ioc_aen_event event); static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc); -static void bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc); static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc); static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force); static void bfa_ioc_timeout(void *ioc); @@ -77,8 +88,6 @@ static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force); static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc); static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc); static void bfa_ioc_recover(struct bfa_ioc_s *ioc); -static bfa_boolean_t bfa_ioc_firmware_lock(struct bfa_ioc_s *ioc); -static void bfa_ioc_firmware_unlock(struct bfa_ioc_s *ioc); static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc); static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc); @@ -508,14 +517,19 @@ bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event) bfa_trc(ioc, event); switch (event) { - case IOC_E_HWERROR: case IOC_E_FWRSP_DISABLE: + bfa_ioc_timer_stop(ioc); + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); + break; + + case IOC_E_HWERROR: bfa_ioc_timer_stop(ioc); /* * !!! fall through !!! */ case IOC_E_TIMEOUT: + bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL); bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); break; @@ -608,15 +622,12 @@ bfa_ioc_sm_hbfail_entry(struct bfa_ioc_s *ioc) * Mark IOC as failed in hardware and stop firmware. */ bfa_ioc_lpu_stop(ioc); - bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_HBFAIL); + bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL); - if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) { - bfa_reg_write(ioc->ioc_regs.ll_halt, __FW_INIT_HALT_P); - /* - * Wait for halt to take effect - */ - bfa_reg_read(ioc->ioc_regs.ll_halt); - } + /** + * Notify other functions on HB failure. + */ + bfa_ioc_notify_hbfail(ioc); /** * Notify driver and common modules registered for notification. @@ -672,6 +683,12 @@ bfa_ioc_sm_hbfail(struct bfa_ioc_s *ioc, enum ioc_event event) */ break; + case IOC_E_HWERROR: + /* + * HB failure notification, ignore. + */ + break; + default: bfa_sm_fault(ioc, event); } @@ -700,7 +717,7 @@ bfa_ioc_disable_comp(struct bfa_ioc_s *ioc) } } -static void +void bfa_ioc_sem_timeout(void *ioc_arg) { struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg; @@ -708,26 +725,32 @@ bfa_ioc_sem_timeout(void *ioc_arg) bfa_ioc_hw_sem_get(ioc); } -static void -bfa_ioc_usage_sem_get(struct bfa_ioc_s *ioc) +bfa_boolean_t +bfa_ioc_sem_get(bfa_os_addr_t sem_reg) { - u32 r32; - int cnt = 0; -#define BFA_SEM_SPINCNT 1000 + u32 r32; + int cnt = 0; +#define BFA_SEM_SPINCNT 3000 - do { - r32 = bfa_reg_read(ioc->ioc_regs.ioc_usage_sem_reg); + r32 = bfa_reg_read(sem_reg); + + while (r32 && (cnt < BFA_SEM_SPINCNT)) { cnt++; - if (cnt > BFA_SEM_SPINCNT) - break; - } while (r32 != 0); + bfa_os_udelay(2); + r32 = bfa_reg_read(sem_reg); + } + + if (r32 == 0) + return BFA_TRUE; + bfa_assert(cnt < BFA_SEM_SPINCNT); + return BFA_FALSE; } -static void -bfa_ioc_usage_sem_release(struct bfa_ioc_s *ioc) +void +bfa_ioc_sem_release(bfa_os_addr_t sem_reg) { - bfa_reg_write(ioc->ioc_regs.ioc_usage_sem_reg, 1); + bfa_reg_write(sem_reg, 1); } static void @@ -737,7 +760,7 @@ bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc) /** * First read to the semaphore register will return 0, subsequent reads - * will return 1. Semaphore is released by writing 0 to the register + * will return 1. Semaphore is released by writing 1 to the register */ r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg); if (r32 == 0) { @@ -746,10 +769,10 @@ bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc) } bfa_timer_begin(ioc->timer_mod, &ioc->sem_timer, bfa_ioc_sem_timeout, - ioc, BFA_IOC_TOV); + ioc, BFA_IOC_HWSEM_TOV); } -static void +void bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc) { bfa_reg_write(ioc->ioc_regs.ioc_sem_reg, 1); @@ -828,7 +851,7 @@ bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc) /** * Get driver and firmware versions. */ -static void +void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) { u32 pgnum, pgoff; @@ -847,24 +870,10 @@ bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) } } -static u32 * -bfa_ioc_fwimg_get_chunk(struct bfa_ioc_s *ioc, u32 off) -{ - if (ioc->ctdev) - return bfi_image_ct_get_chunk(off); - return bfi_image_cb_get_chunk(off); -} - -static u32 -bfa_ioc_fwimg_get_size(struct bfa_ioc_s *ioc) -{ -return (ioc->ctdev) ? bfi_image_ct_size : bfi_image_cb_size; -} - /** * Returns TRUE if same. */ -static bfa_boolean_t +bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) { struct bfi_ioc_image_hdr_s *drv_fwhdr; @@ -920,95 +929,6 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc) return bfa_ioc_fwver_cmp(ioc, &fwhdr); } -/** - * Return true if firmware of current driver matches the running firmware. - */ -static bfa_boolean_t -bfa_ioc_firmware_lock(struct bfa_ioc_s *ioc) -{ - enum bfi_ioc_state ioc_fwstate; - u32 usecnt; - struct bfi_ioc_image_hdr_s fwhdr; - - /** - * Firmware match check is relevant only for CNA. - */ - if (!ioc->cna) - return BFA_TRUE; - - /** - * If bios boot (flash based) -- do not increment usage count - */ - if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ) - return BFA_TRUE; - - bfa_ioc_usage_sem_get(ioc); - usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg); - - /** - * If usage count is 0, always return TRUE. - */ - if (usecnt == 0) { - bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 1); - bfa_ioc_usage_sem_release(ioc); - bfa_trc(ioc, usecnt); - return BFA_TRUE; - } - - ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate); - bfa_trc(ioc, ioc_fwstate); - - /** - * Use count cannot be non-zero and chip in uninitialized state. - */ - bfa_assert(ioc_fwstate != BFI_IOC_UNINIT); - - /** - * Check if another driver with a different firmware is active - */ - bfa_ioc_fwver_get(ioc, &fwhdr); - if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) { - bfa_ioc_usage_sem_release(ioc); - bfa_trc(ioc, usecnt); - return BFA_FALSE; - } - - /** - * Same firmware version. Increment the reference count. - */ - usecnt++; - bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt); - bfa_ioc_usage_sem_release(ioc); - bfa_trc(ioc, usecnt); - return BFA_TRUE; -} - -static void -bfa_ioc_firmware_unlock(struct bfa_ioc_s *ioc) -{ - u32 usecnt; - - /** - * Firmware lock is relevant only for CNA. - * If bios boot (flash based) -- do not decrement usage count - */ - if (!ioc->cna || (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)) - return; - - /** - * decrement usage count - */ - bfa_ioc_usage_sem_get(ioc); - usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg); - bfa_assert(usecnt > 0); - - usecnt--; - bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt); - bfa_trc(ioc, usecnt); - - bfa_ioc_usage_sem_release(ioc); -} - /** * Conditionally flush any pending message from firmware at start. */ @@ -1152,33 +1072,27 @@ bfa_ioc_send_getattr(struct bfa_ioc_s *ioc) static void bfa_ioc_hb_check(void *cbarg) { - struct bfa_ioc_s *ioc = cbarg; - u32 hb_count; + struct bfa_ioc_s *ioc = cbarg; + u32 hb_count; hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat); if (ioc->hb_count == hb_count) { - ioc->hb_fail++; - } else { - ioc->hb_count = hb_count; - ioc->hb_fail = 0; - } - - if (ioc->hb_fail >= BFA_IOC_HB_FAIL_MAX) { - bfa_log(ioc->logm, BFA_LOG_HAL_HEARTBEAT_FAILURE, hb_count); - ioc->hb_fail = 0; + bfa_log(ioc->logm, BFA_LOG_HAL_HEARTBEAT_FAILURE, + hb_count); bfa_ioc_recover(ioc); return; + } else { + ioc->hb_count = hb_count; } bfa_ioc_mbox_poll(ioc); - bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc, - BFA_IOC_HB_TOV); + bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, + ioc, BFA_IOC_HB_TOV); } static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc) { - ioc->hb_fail = 0; ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat); bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc, BFA_IOC_HB_TOV); @@ -1190,112 +1104,6 @@ bfa_ioc_hb_stop(struct bfa_ioc_s *ioc) bfa_timer_stop(&ioc->ioc_timer); } -/** - * Host to LPU mailbox message addresses - */ -static struct { - u32 hfn_mbox, lpu_mbox, hfn_pgn; -} iocreg_fnreg[] = { - { - HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0}, { - HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1}, { - HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2}, { - HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3} -}; - -/** - * Host <-> LPU mailbox command/status registers - port 0 - */ -static struct { - u32 hfn, lpu; -} iocreg_mbcmd_p0[] = { - { - HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT}, { - HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT}, { - HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT}, { - HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT} -}; - -/** - * Host <-> LPU mailbox command/status registers - port 1 - */ -static struct { - u32 hfn, lpu; -} iocreg_mbcmd_p1[] = { - { - HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT}, { - HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT}, { - HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT}, { - HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT} -}; - -/** - * Shared IRQ handling in INTX mode - */ -static struct { - u32 isr, msk; -} iocreg_shirq_next[] = { - { - HOSTFN1_INT_STATUS, HOSTFN1_INT_MSK}, { - HOSTFN2_INT_STATUS, HOSTFN2_INT_MSK}, { - HOSTFN3_INT_STATUS, HOSTFN3_INT_MSK}, { -HOSTFN0_INT_STATUS, HOSTFN0_INT_MSK},}; - -static void -bfa_ioc_reg_init(struct bfa_ioc_s *ioc) -{ - bfa_os_addr_t rb; - int pcifn = bfa_ioc_pcifn(ioc); - - rb = bfa_ioc_bar0(ioc); - - ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox; - ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox; - ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn; - - if (ioc->port_id == 0) { - ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; - ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; - ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn; - ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu; - ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; - } else { - ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); - ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); - ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn; - ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu; - ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; - } - - /** - * Shared IRQ handling in INTX mode - */ - ioc->ioc_regs.shirq_isr_next = rb + iocreg_shirq_next[pcifn].isr; - ioc->ioc_regs.shirq_msk_next = rb + iocreg_shirq_next[pcifn].msk; - - /* - * PSS control registers - */ - ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG); - ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG); - ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG); - - /* - * IOC semaphore registers and serialization - */ - ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG); - ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG); - ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT); - - /** - * sram memory access - */ - ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); - ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CB; - if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) - ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; -} - /** * Initiate a full firmware download. */ @@ -1332,17 +1140,17 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type, for (i = 0; i < bfa_ioc_fwimg_get_size(ioc); i++) { - if (BFA_FLASH_CHUNK_NO(i) != chunkno) { - chunkno = BFA_FLASH_CHUNK_NO(i); + if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) { + chunkno = BFA_IOC_FLASH_CHUNK_NO(i); fwimg = bfa_ioc_fwimg_get_chunk(ioc, - BFA_FLASH_CHUNK_ADDR(chunkno)); + BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); } /** * write smem */ bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, - fwimg[BFA_FLASH_OFFSET_IN_CHUNK(i)]); + fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]); loff += sizeof(u32); @@ -1439,168 +1247,10 @@ bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc) bfa_q_deq(&mod->cmd_q, &cmd); } -/** - * Initialize IOC to port mapping. - */ - -#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8) -static void -bfa_ioc_map_port(struct bfa_ioc_s *ioc) -{ - bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; - u32 r32; - - /** - * For crossbow, port id is same as pci function. - */ - if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_CT) { - ioc->port_id = bfa_ioc_pcifn(ioc); - return; - } - - /** - * For catapult, base port id on personality register and IOC type - */ - r32 = bfa_reg_read(rb + FNC_PERS_REG); - r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)); - ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH; - - bfa_trc(ioc, bfa_ioc_pcifn(ioc)); - bfa_trc(ioc, ioc->port_id); -} - - - /** * bfa_ioc_public */ -/** -* Set interrupt mode for a function: INTX or MSIX - */ -void -bfa_ioc_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix) -{ - bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; - u32 r32, mode; - - r32 = bfa_reg_read(rb + FNC_PERS_REG); - bfa_trc(ioc, r32); - - mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) & - __F0_INTX_STATUS; - - /** - * If already in desired mode, do not change anything - */ - if (!msix && mode) - return; - - if (msix) - mode = __F0_INTX_STATUS_MSIX; - else - mode = __F0_INTX_STATUS_INTA; - - r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); - r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); - bfa_trc(ioc, r32); - - bfa_reg_write(rb + FNC_PERS_REG, r32); -} - -bfa_status_t -bfa_ioc_pll_init(struct bfa_ioc_s *ioc) -{ - bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; - u32 pll_sclk, pll_fclk, r32; - - if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) { - pll_sclk = - __APP_PLL_312_ENABLE | __APP_PLL_312_LRESETN | - __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(0U) | - __APP_PLL_312_JITLMT0_1(3U) | - __APP_PLL_312_CNTLMT0_1(1U); - pll_fclk = - __APP_PLL_425_ENABLE | __APP_PLL_425_LRESETN | - __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(0U) | - __APP_PLL_425_JITLMT0_1(3U) | - __APP_PLL_425_CNTLMT0_1(1U); - - /** - * For catapult, choose operational mode FC/FCoE - */ - if (ioc->fcmode) { - bfa_reg_write((rb + OP_MODE), 0); - bfa_reg_write((rb + ETH_MAC_SER_REG), - __APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 - | __APP_EMS_CHANNEL_SEL); - } else { - ioc->pllinit = BFA_TRUE; - bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE); - bfa_reg_write((rb + ETH_MAC_SER_REG), - __APP_EMS_REFCKBUFEN1); - } - } else { - pll_sclk = - __APP_PLL_312_ENABLE | __APP_PLL_312_LRESETN | - __APP_PLL_312_P0_1(3U) | __APP_PLL_312_JITLMT0_1(3U) | - __APP_PLL_312_CNTLMT0_1(3U); - pll_fclk = - __APP_PLL_425_ENABLE | __APP_PLL_425_LRESETN | - __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) | - __APP_PLL_425_JITLMT0_1(3U) | - __APP_PLL_425_CNTLMT0_1(3U); - } - - bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT); - bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT); - - bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); - bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); - bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); - bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); - bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); - bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); - - bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, - __APP_PLL_312_LOGIC_SOFT_RESET); - bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, - __APP_PLL_312_BYPASS | __APP_PLL_312_LOGIC_SOFT_RESET); - bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, - __APP_PLL_425_LOGIC_SOFT_RESET); - bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, - __APP_PLL_425_BYPASS | __APP_PLL_425_LOGIC_SOFT_RESET); - bfa_os_udelay(2); - bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, - __APP_PLL_312_LOGIC_SOFT_RESET); - bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, - __APP_PLL_425_LOGIC_SOFT_RESET); - - bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, - pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET); - bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, - pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET); - - /** - * Wait for PLLs to lock. - */ - bfa_os_udelay(2000); - bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); - bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); - - bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk); - bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk); - - if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) { - bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START); - bfa_os_udelay(1000); - r32 = bfa_reg_read((rb + MBIST_STAT_REG)); - bfa_trc(ioc, r32); - } - - return BFA_STATUS_OK; -} - /** * Interface used by diag module to do firmware boot with memory test * as the entry vector. @@ -1764,6 +1414,14 @@ bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev, ioc->ctdev = (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT); ioc->cna = ioc->ctdev && !ioc->fcmode; + /** + * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c + */ + if (ioc->ctdev) + bfa_ioc_set_ct_hwif(ioc); + else + bfa_ioc_set_cb_hwif(ioc); + bfa_ioc_map_port(ioc); bfa_ioc_reg_init(ioc); } @@ -1973,7 +1631,7 @@ bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc) ((__sm) == BFI_IOC_INITING) || \ ((__sm) == BFI_IOC_HWINIT) || \ ((__sm) == BFI_IOC_DISABLED) || \ - ((__sm) == BFI_IOC_HBFAIL) || \ + ((__sm) == BFI_IOC_FAIL) || \ ((__sm) == BFI_IOC_CFG_DISABLED)) /** @@ -2194,29 +1852,6 @@ bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc) return ioc->fcmode || (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_CT); } -/** - * Return true if interrupt should be claimed. - */ -bfa_boolean_t -bfa_ioc_intx_claim(struct bfa_ioc_s *ioc) -{ - u32 isr, msk; - - /** - * Always claim if not catapult. - */ - if (!ioc->ctdev) - return BFA_TRUE; - - /** - * FALSE if next device is claiming interrupt. - * TRUE if next device is not interrupting or not present. - */ - msk = bfa_reg_read(ioc->ioc_regs.shirq_msk_next); - isr = bfa_reg_read(ioc->ioc_regs.shirq_isr_next); - return !(isr & ~msk); -} - /** * Send AEN notification */ @@ -2304,6 +1939,13 @@ bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen) pgnum = bfa_ioc_smem_pgnum(ioc, loff); loff = bfa_ioc_smem_pgoff(ioc, loff); + + /* + * Hold semaphore to serialize pll init and fwtrc. + */ + if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) + return BFA_STATUS_FAILED; + bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); tlen = *trclen; @@ -2329,6 +1971,12 @@ bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen) } bfa_reg_write(ioc->ioc_regs.host_page_num_fn, bfa_ioc_smem_pgnum(ioc, 0)); + + /* + * release semaphore. + */ + bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); + bfa_trc(ioc, pgnum); *trclen = tlen * sizeof(u32); diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h index 7c30f05ab137..1633a50187f7 100644 --- a/drivers/scsi/bfa/bfa_ioc.h +++ b/drivers/scsi/bfa/bfa_ioc.h @@ -78,11 +78,13 @@ struct bfa_ioc_regs_s { bfa_os_addr_t app_pll_slow_ctl_reg; bfa_os_addr_t ioc_sem_reg; bfa_os_addr_t ioc_usage_sem_reg; + bfa_os_addr_t ioc_init_sem_reg; bfa_os_addr_t ioc_usage_reg; bfa_os_addr_t host_page_num_fn; bfa_os_addr_t heartbeat; bfa_os_addr_t ioc_fwstate; bfa_os_addr_t ll_halt; + bfa_os_addr_t err_set; bfa_os_addr_t shirq_isr_next; bfa_os_addr_t shirq_msk_next; bfa_os_addr_t smem_page_start; @@ -154,7 +156,6 @@ struct bfa_ioc_s { struct bfa_timer_s ioc_timer; struct bfa_timer_s sem_timer; u32 hb_count; - u32 hb_fail; u32 retry_count; struct list_head hb_notify_q; void *dbg_fwsave; @@ -177,6 +178,22 @@ struct bfa_ioc_s { struct bfi_ioc_attr_s *attr; struct bfa_ioc_cbfn_s *cbfn; struct bfa_ioc_mbox_mod_s mbox_mod; + struct bfa_ioc_hwif_s *ioc_hwif; +}; + +struct bfa_ioc_hwif_s { + bfa_status_t (*ioc_pll_init) (struct bfa_ioc_s *ioc); + bfa_boolean_t (*ioc_firmware_lock) (struct bfa_ioc_s *ioc); + void (*ioc_firmware_unlock) (struct bfa_ioc_s *ioc); + u32 * (*ioc_fwimg_get_chunk) (struct bfa_ioc_s *ioc, + u32 off); + u32 (*ioc_fwimg_get_size) (struct bfa_ioc_s *ioc); + void (*ioc_reg_init) (struct bfa_ioc_s *ioc); + void (*ioc_map_port) (struct bfa_ioc_s *ioc); + void (*ioc_isr_mode_set) (struct bfa_ioc_s *ioc, + bfa_boolean_t msix); + void (*ioc_notify_hbfail) (struct bfa_ioc_s *ioc); + void (*ioc_ownership_reset) (struct bfa_ioc_s *ioc); }; #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func) @@ -191,6 +208,15 @@ struct bfa_ioc_s { #define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit) #define bfa_ioc_speed_sup(__ioc) \ BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop) +#define bfa_ioc_get_nports(__ioc) \ + BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop) + +#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++) +#define BFA_IOC_FWIMG_MINSZ (16 * 1024) + +#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS) +#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS) +#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS) /** * IOC mailbox interface @@ -207,6 +233,14 @@ void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc, /** * IOC interfaces */ +#define bfa_ioc_pll_init(__ioc) ((__ioc)->ioc_hwif->ioc_pll_init(__ioc)) +#define bfa_ioc_isr_mode_set(__ioc, __msix) \ + ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix)) +#define bfa_ioc_ownership_reset(__ioc) \ + ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc)) + +void bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc); +void bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc); void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod, struct bfa_trc_mod_s *trcmod, @@ -223,8 +257,6 @@ bfa_boolean_t bfa_ioc_intx_claim(struct bfa_ioc_s *ioc); void bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param); void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg); void bfa_ioc_error_isr(struct bfa_ioc_s *ioc); -void bfa_ioc_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t intx); -bfa_status_t bfa_ioc_pll_init(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc); @@ -245,6 +277,13 @@ void bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc); void bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc, struct bfa_ioc_hbfail_notify_s *notify); +bfa_boolean_t bfa_ioc_sem_get(bfa_os_addr_t sem_reg); +void bfa_ioc_sem_release(bfa_os_addr_t sem_reg); +void bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc); +void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, + struct bfi_ioc_image_hdr_s *fwhdr); +bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, + struct bfi_ioc_image_hdr_s *fwhdr); /* * bfa mfg wwn API functions diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c new file mode 100644 index 000000000000..d1d625bcd721 --- /dev/null +++ b/drivers/scsi/bfa/bfa_ioc_cb.c @@ -0,0 +1,273 @@ +/* + * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + * + * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +BFA_TRC_FILE(CNA, IOC_CB); + +/* + * forward declarations + */ +static bfa_status_t bfa_ioc_cb_pll_init(struct bfa_ioc_s *ioc); +static bfa_boolean_t bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc); +static void bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc); +static u32 *bfa_ioc_cb_fwimg_get_chunk(struct bfa_ioc_s *ioc, u32 off); +static u32 bfa_ioc_cb_fwimg_get_size(struct bfa_ioc_s *ioc); +static void bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc); +static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc); +static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix); +static void bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc); +static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc); + +struct bfa_ioc_hwif_s hwif_cb = { + bfa_ioc_cb_pll_init, + bfa_ioc_cb_firmware_lock, + bfa_ioc_cb_firmware_unlock, + bfa_ioc_cb_fwimg_get_chunk, + bfa_ioc_cb_fwimg_get_size, + bfa_ioc_cb_reg_init, + bfa_ioc_cb_map_port, + bfa_ioc_cb_isr_mode_set, + bfa_ioc_cb_notify_hbfail, + bfa_ioc_cb_ownership_reset, +}; + +/** + * Called from bfa_ioc_attach() to map asic specific calls. + */ +void +bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc) +{ + ioc->ioc_hwif = &hwif_cb; +} + +static uint32_t * +bfa_ioc_cb_fwimg_get_chunk(struct bfa_ioc_s *ioc, uint32_t off) +{ + return bfi_image_cb_get_chunk(off); +} + +static uint32_t +bfa_ioc_cb_fwimg_get_size(struct bfa_ioc_s *ioc) +{ + return bfi_image_cb_size; +} + +/** + * Return true if firmware of current driver matches the running firmware. + */ +static bfa_boolean_t +bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc) +{ + return BFA_TRUE; +} + +static void +bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc) +{ +} + +/** + * Notify other functions on HB failure. + */ +static void +bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc) +{ + bfa_reg_write(ioc->ioc_regs.err_set, __PSS_ERR_STATUS_SET); + bfa_reg_read(ioc->ioc_regs.err_set); +} + +/** + * Host to LPU mailbox message addresses + */ +static struct { uint32_t hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = { + { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 }, + { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 } +}; + +/** + * Host <-> LPU mailbox command/status registers + */ +static struct { uint32_t hfn, lpu; } iocreg_mbcmd[] = { + { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT }, + { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT } +}; + +static void +bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc) +{ + bfa_os_addr_t rb; + int pcifn = bfa_ioc_pcifn(ioc); + + rb = bfa_ioc_bar0(ioc); + + ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox; + ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox; + ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn; + + if (ioc->port_id == 0) { + ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; + ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; + } else { + ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); + ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); + } + + /** + * Host <-> LPU mailbox command/status registers + */ + ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd[pcifn].hfn; + ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd[pcifn].lpu; + + /* + * PSS control registers + */ + ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG); + ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_400_CTL_REG); + ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_212_CTL_REG); + + /* + * IOC semaphore registers and serialization + */ + ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG); + ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG); + + /** + * sram memory access + */ + ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); + ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CB; + + /* + * err set reg : for notification of hb failure + */ + ioc->ioc_regs.err_set = (rb + ERR_SET_REG); +} + +/** + * Initialize IOC to port mapping. + */ +static void +bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc) +{ + /** + * For crossbow, port id is same as pci function. + */ + ioc->port_id = bfa_ioc_pcifn(ioc); + bfa_trc(ioc, ioc->port_id); +} + +/** + * Set interrupt mode for a function: INTX or MSIX + */ +static void +bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix) +{ +} + +static bfa_status_t +bfa_ioc_cb_pll_init(struct bfa_ioc_s *ioc) +{ + bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; + uint32_t pll_sclk, pll_fclk; + + /* + * Hold semaphore so that nobody can access the chip during init. + */ + bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg); + + pll_sclk = __APP_PLL_212_ENABLE | __APP_PLL_212_LRESETN | + __APP_PLL_212_P0_1(3U) | + __APP_PLL_212_JITLMT0_1(3U) | + __APP_PLL_212_CNTLMT0_1(3U); + pll_fclk = __APP_PLL_400_ENABLE | __APP_PLL_400_LRESETN | + __APP_PLL_400_RSEL200500 | __APP_PLL_400_P0_1(3U) | + __APP_PLL_400_JITLMT0_1(3U) | + __APP_PLL_400_CNTLMT0_1(3U); + + bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT); + bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT); + + bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); + bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); + bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); + bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); + bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); + bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); + + bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, + __APP_PLL_212_LOGIC_SOFT_RESET); + bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, + __APP_PLL_212_BYPASS | + __APP_PLL_212_LOGIC_SOFT_RESET); + bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, + __APP_PLL_400_LOGIC_SOFT_RESET); + bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, + __APP_PLL_400_BYPASS | + __APP_PLL_400_LOGIC_SOFT_RESET); + bfa_os_udelay(2); + bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, + __APP_PLL_212_LOGIC_SOFT_RESET); + bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, + __APP_PLL_400_LOGIC_SOFT_RESET); + + bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, + pll_sclk | __APP_PLL_212_LOGIC_SOFT_RESET); + bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, + pll_fclk | __APP_PLL_400_LOGIC_SOFT_RESET); + + /** + * Wait for PLLs to lock. + */ + bfa_os_udelay(2000); + bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); + bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); + + bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk); + bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk); + + /* + * release semaphore. + */ + bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); + + return BFA_STATUS_OK; +} + +/** + * Cleanup hw semaphore and usecnt registers + */ +static void +bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc) +{ + + /* + * Read the hw sem reg to make sure that it is locked + * before we clear it. If it is not locked, writing 1 + * will lock it instead of clearing it. + */ + bfa_reg_read(ioc->ioc_regs.ioc_sem_reg); + bfa_ioc_hw_sem_release(ioc); +} diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c new file mode 100644 index 000000000000..5de9c24efacf --- /dev/null +++ b/drivers/scsi/bfa/bfa_ioc_ct.c @@ -0,0 +1,422 @@ +/* + * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + * + * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +BFA_TRC_FILE(CNA, IOC_CT); + +/* + * forward declarations + */ +static bfa_status_t bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc); +static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc); +static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc); +static uint32_t* bfa_ioc_ct_fwimg_get_chunk(struct bfa_ioc_s *ioc, + uint32_t off); +static uint32_t bfa_ioc_ct_fwimg_get_size(struct bfa_ioc_s *ioc); +static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc); +static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc); +static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix); +static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc); +static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc); + +struct bfa_ioc_hwif_s hwif_ct = { + bfa_ioc_ct_pll_init, + bfa_ioc_ct_firmware_lock, + bfa_ioc_ct_firmware_unlock, + bfa_ioc_ct_fwimg_get_chunk, + bfa_ioc_ct_fwimg_get_size, + bfa_ioc_ct_reg_init, + bfa_ioc_ct_map_port, + bfa_ioc_ct_isr_mode_set, + bfa_ioc_ct_notify_hbfail, + bfa_ioc_ct_ownership_reset, +}; + +/** + * Called from bfa_ioc_attach() to map asic specific calls. + */ +void +bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc) +{ + ioc->ioc_hwif = &hwif_ct; +} + +static uint32_t* +bfa_ioc_ct_fwimg_get_chunk(struct bfa_ioc_s *ioc, uint32_t off) +{ + return bfi_image_ct_get_chunk(off); +} + +static uint32_t +bfa_ioc_ct_fwimg_get_size(struct bfa_ioc_s *ioc) +{ + return bfi_image_ct_size; +} + +/** + * Return true if firmware of current driver matches the running firmware. + */ +static bfa_boolean_t +bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc) +{ + enum bfi_ioc_state ioc_fwstate; + uint32_t usecnt; + struct bfi_ioc_image_hdr_s fwhdr; + + /** + * Firmware match check is relevant only for CNA. + */ + if (!ioc->cna) + return BFA_TRUE; + + /** + * If bios boot (flash based) -- do not increment usage count + */ + if (bfa_ioc_ct_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ) + return BFA_TRUE; + + bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); + usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg); + + /** + * If usage count is 0, always return TRUE. + */ + if (usecnt == 0) { + bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 1); + bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); + bfa_trc(ioc, usecnt); + return BFA_TRUE; + } + + ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate); + bfa_trc(ioc, ioc_fwstate); + + /** + * Use count cannot be non-zero and chip in uninitialized state. + */ + bfa_assert(ioc_fwstate != BFI_IOC_UNINIT); + + /** + * Check if another driver with a different firmware is active + */ + bfa_ioc_fwver_get(ioc, &fwhdr); + if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) { + bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); + bfa_trc(ioc, usecnt); + return BFA_FALSE; + } + + /** + * Same firmware version. Increment the reference count. + */ + usecnt++; + bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt); + bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); + bfa_trc(ioc, usecnt); + return BFA_TRUE; +} + +static void +bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc) +{ + uint32_t usecnt; + + /** + * Firmware lock is relevant only for CNA. + * If bios boot (flash based) -- do not decrement usage count + */ + if (!ioc->cna || bfa_ioc_ct_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ) + return; + + /** + * decrement usage count + */ + bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); + usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg); + bfa_assert(usecnt > 0); + + usecnt--; + bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt); + bfa_trc(ioc, usecnt); + + bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); +} + +/** + * Notify other functions on HB failure. + */ +static void +bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc) +{ + + bfa_reg_write(ioc->ioc_regs.ll_halt, __FW_INIT_HALT_P); + /* Wait for halt to take effect */ + bfa_reg_read(ioc->ioc_regs.ll_halt); +} + +/** + * Host to LPU mailbox message addresses + */ +static struct { uint32_t hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = { + { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 }, + { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }, + { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 }, + { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 } +}; + +/** + * Host <-> LPU mailbox command/status registers - port 0 + */ +static struct { uint32_t hfn, lpu; } iocreg_mbcmd_p0[] = { + { HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT }, + { HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT }, + { HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT }, + { HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT } +}; + +/** + * Host <-> LPU mailbox command/status registers - port 1 + */ +static struct { uint32_t hfn, lpu; } iocreg_mbcmd_p1[] = { + { HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT }, + { HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT }, + { HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT }, + { HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT } +}; + +static void +bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc) +{ + bfa_os_addr_t rb; + int pcifn = bfa_ioc_pcifn(ioc); + + rb = bfa_ioc_bar0(ioc); + + ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox; + ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox; + ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn; + + if (ioc->port_id == 0) { + ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; + ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; + ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn; + ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu; + ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; + } else { + ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); + ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); + ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn; + ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu; + ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; + } + + /* + * PSS control registers + */ + ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG); + ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG); + ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG); + + /* + * IOC semaphore registers and serialization + */ + ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG); + ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG); + ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG); + ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT); + + /** + * sram memory access + */ + ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); + ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; +} + +/** + * Initialize IOC to port mapping. + */ + +#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8) +static void +bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc) +{ + bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; + uint32_t r32; + + /** + * For catapult, base port id on personality register and IOC type + */ + r32 = bfa_reg_read(rb + FNC_PERS_REG); + r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)); + ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH; + + bfa_trc(ioc, bfa_ioc_pcifn(ioc)); + bfa_trc(ioc, ioc->port_id); +} + +/** + * Set interrupt mode for a function: INTX or MSIX + */ +static void +bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix) +{ + bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; + uint32_t r32, mode; + + r32 = bfa_reg_read(rb + FNC_PERS_REG); + bfa_trc(ioc, r32); + + mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) & + __F0_INTX_STATUS; + + /** + * If already in desired mode, do not change anything + */ + if (!msix && mode) + return; + + if (msix) + mode = __F0_INTX_STATUS_MSIX; + else + mode = __F0_INTX_STATUS_INTA; + + r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); + r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); + bfa_trc(ioc, r32); + + bfa_reg_write(rb + FNC_PERS_REG, r32); +} + +static bfa_status_t +bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc) +{ + bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; + uint32_t pll_sclk, pll_fclk, r32; + + /* + * Hold semaphore so that nobody can access the chip during init. + */ + bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg); + + pll_sclk = __APP_PLL_312_ENABLE | __APP_PLL_312_LRESETN | + __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(0U) | + __APP_PLL_312_JITLMT0_1(3U) | + __APP_PLL_312_CNTLMT0_1(1U); + pll_fclk = __APP_PLL_425_ENABLE | __APP_PLL_425_LRESETN | + __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(0U) | + __APP_PLL_425_JITLMT0_1(3U) | + __APP_PLL_425_CNTLMT0_1(1U); + + /** + * For catapult, choose operational mode FC/FCoE + */ + if (ioc->fcmode) { + bfa_reg_write((rb + OP_MODE), 0); + bfa_reg_write((rb + ETH_MAC_SER_REG), + __APP_EMS_CMLCKSEL | + __APP_EMS_REFCKBUFEN2 | + __APP_EMS_CHANNEL_SEL); + } else { + ioc->pllinit = BFA_TRUE; + bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE); + bfa_reg_write((rb + ETH_MAC_SER_REG), + __APP_EMS_REFCKBUFEN1); + } + + bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT); + bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT); + + bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); + bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); + bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); + bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); + bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); + bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); + + bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, + __APP_PLL_312_LOGIC_SOFT_RESET); + bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, + __APP_PLL_312_BYPASS | + __APP_PLL_312_LOGIC_SOFT_RESET); + bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, + __APP_PLL_425_LOGIC_SOFT_RESET); + bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, + __APP_PLL_425_BYPASS | + __APP_PLL_425_LOGIC_SOFT_RESET); + bfa_os_udelay(2); + bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, + __APP_PLL_312_LOGIC_SOFT_RESET); + bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, + __APP_PLL_425_LOGIC_SOFT_RESET); + + bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, + pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET); + bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, + pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET); + + /** + * Wait for PLLs to lock. + */ + bfa_os_udelay(2000); + bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); + bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); + + bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk); + bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk); + + bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START); + bfa_os_udelay(1000); + r32 = bfa_reg_read((rb + MBIST_STAT_REG)); + bfa_trc(ioc, r32); + /* + * release semaphore. + */ + bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); + + return BFA_STATUS_OK; +} + +/** + * Cleanup hw semaphore and usecnt registers + */ +static void +bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc) +{ + + if (ioc->cna) { + bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); + bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 0); + bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); + } + + /* + * Read the hw sem reg to make sure that it is locked + * before we clear it. If it is not locked, writing 1 + * will lock it instead of clearing it. + */ + bfa_reg_read(ioc->ioc_regs.ioc_sem_reg); + bfa_ioc_hw_sem_release(ioc); +} diff --git a/drivers/scsi/bfa/include/bfa.h b/drivers/scsi/bfa/include/bfa.h index d4bc0d9fa42c..942ae64038c9 100644 --- a/drivers/scsi/bfa/include/bfa.h +++ b/drivers/scsi/bfa/include/bfa.h @@ -161,6 +161,7 @@ bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa, void bfa_iocfc_enable(struct bfa_s *bfa); void bfa_iocfc_disable(struct bfa_s *bfa); void bfa_ioc_auto_recover(bfa_boolean_t auto_recover); +void bfa_chip_reset(struct bfa_s *bfa); void bfa_cb_ioc_disable(void *bfad); void bfa_timer_tick(struct bfa_s *bfa); #define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \ diff --git a/drivers/scsi/bfa/include/bfa_timer.h b/drivers/scsi/bfa/include/bfa_timer.h index e407103fa565..f71087448222 100644 --- a/drivers/scsi/bfa/include/bfa_timer.h +++ b/drivers/scsi/bfa/include/bfa_timer.h @@ -41,7 +41,7 @@ struct bfa_timer_mod_s { struct list_head timer_q; }; -#define BFA_TIMER_FREQ 500 /**< specified in millisecs */ +#define BFA_TIMER_FREQ 200 /**< specified in millisecs */ void bfa_timer_beat(struct bfa_timer_mod_s *mod); void bfa_timer_init(struct bfa_timer_mod_s *mod); diff --git a/drivers/scsi/bfa/include/bfi/bfi_cbreg.h b/drivers/scsi/bfa/include/bfi/bfi_cbreg.h index b3bb52b565b1..781cefafb659 100644 --- a/drivers/scsi/bfa/include/bfi/bfi_cbreg.h +++ b/drivers/scsi/bfa/include/bfi/bfi_cbreg.h @@ -177,7 +177,8 @@ #define __PSS_LMEM_INIT_EN 0x00000100 #define __PSS_LPU1_RESET 0x00000002 #define __PSS_LPU0_RESET 0x00000001 - +#define ERR_SET_REG 0x00018818 +#define __PSS_ERR_STATUS_SET 0x00000fff /* * These definitions are either in error/missing in spec. Its auto-generated diff --git a/drivers/scsi/bfa/include/bfi/bfi_ctreg.h b/drivers/scsi/bfa/include/bfi/bfi_ctreg.h index dd2992c38afb..d84ebae70cb4 100644 --- a/drivers/scsi/bfa/include/bfi/bfi_ctreg.h +++ b/drivers/scsi/bfa/include/bfi/bfi_ctreg.h @@ -430,6 +430,8 @@ enum { #define __PSS_LMEM_INIT_EN 0x00000100 #define __PSS_LPU1_RESET 0x00000002 #define __PSS_LPU0_RESET 0x00000001 +#define ERR_SET_REG 0x00018818 +#define __PSS_ERR_STATUS_SET 0x003fffff #define HQM_QSET0_RXQ_DRBL_P0 0x00038000 #define __RXQ0_ADD_VECTORS_P 0x80000000 #define __RXQ0_STOP_P 0x40000000 diff --git a/drivers/scsi/bfa/include/bfi/bfi_ioc.h b/drivers/scsi/bfa/include/bfi/bfi_ioc.h index 96ef05670659..a0158aac0024 100644 --- a/drivers/scsi/bfa/include/bfi/bfi_ioc.h +++ b/drivers/scsi/bfa/include/bfi/bfi_ioc.h @@ -123,7 +123,7 @@ enum bfi_ioc_state { BFI_IOC_DISABLING = 5, /* IOC is being disabled */ BFI_IOC_DISABLED = 6, /* IOC is disabled */ BFI_IOC_CFG_DISABLED = 7, /* IOC is being disabled;transient */ - BFI_IOC_HBFAIL = 8, /* IOC heart-beat failure */ + BFI_IOC_FAIL = 8, /* IOC heart-beat failure */ BFI_IOC_MEMTEST = 9, /* IOC is doing memtest */ }; diff --git a/drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h b/drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h index 43ba7064e81a..a75a1f3be315 100644 --- a/drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h +++ b/drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h @@ -31,6 +31,10 @@ enum { BFA_TRC_CNA_CEE = 1, BFA_TRC_CNA_PORT = 2, + BFA_TRC_CNA_IOC = 3, + BFA_TRC_CNA_DIAG = 4, + BFA_TRC_CNA_IOC_CB = 5, + BFA_TRC_CNA_IOC_CT = 6, }; #endif /* __BFA_CNA_TRCMOD_H__ */ -- cgit v1.2.3 From 8b651b4294e67789028982d18779a9ebe75c2b8a Mon Sep 17 00:00:00 2001 From: Krishna Gudipati Date: Fri, 5 Mar 2010 19:34:44 -0800 Subject: [SCSI] bfa: Clear LL_HALT and PSS_ERR bit when IOC crashes. Clear LL_HALT and PSS_ERR bit in the interrupt status register on an IOC crash. Signed-off-by: Krishna Gudipati Signed-off-by: James Bottomley --- drivers/scsi/bfa/bfa_intr.c | 35 ++++++++++++++++++++++++++++---- drivers/scsi/bfa/bfa_ioc.h | 1 + drivers/scsi/bfa/bfa_ioc_cb.c | 1 + drivers/scsi/bfa/bfa_ioc_ct.c | 1 + drivers/scsi/bfa/include/bfi/bfi_cbreg.h | 13 ++++++++++++ drivers/scsi/bfa/include/bfi/bfi_ctreg.h | 23 +++++++++++++++++++++ 6 files changed, 70 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/bfa/bfa_intr.c b/drivers/scsi/bfa/bfa_intr.c index ab463db11144..c42254613f73 100644 --- a/drivers/scsi/bfa/bfa_intr.c +++ b/drivers/scsi/bfa/bfa_intr.c @@ -197,17 +197,44 @@ bfa_msix_rspq(struct bfa_s *bfa, int rsp_qid) void bfa_msix_lpu_err(struct bfa_s *bfa, int vec) { - u32 intr; + u32 intr, curr_value; intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status); if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1)) bfa_msix_lpu(bfa); - if (intr & (__HFN_INT_ERR_EMC | - __HFN_INT_ERR_LPU0 | __HFN_INT_ERR_LPU1 | - __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT)) + intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | + __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT); + + if (intr) { + if (intr & __HFN_INT_LL_HALT) { + /** + * If LL_HALT bit is set then FW Init Halt LL Port + * Register needs to be cleared as well so Interrupt + * Status Register will be cleared. + */ + curr_value = bfa_reg_read(bfa->ioc.ioc_regs.ll_halt); + curr_value &= ~__FW_INIT_HALT_P; + bfa_reg_write(bfa->ioc.ioc_regs.ll_halt, curr_value); + } + + if (intr & __HFN_INT_ERR_PSS) { + /** + * ERR_PSS bit needs to be cleared as well in case + * interrups are shared so driver's interrupt handler is + * still called eventhough it is already masked out. + */ + curr_value = bfa_reg_read( + bfa->ioc.ioc_regs.pss_err_status_reg); + curr_value &= __PSS_ERR_STATUS_SET; + bfa_reg_write(bfa->ioc.ioc_regs.pss_err_status_reg, + curr_value); + } + + bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr); bfa_msix_errint(bfa, intr); + } } void diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h index 1633a50187f7..853cc3136f0e 100644 --- a/drivers/scsi/bfa/bfa_ioc.h +++ b/drivers/scsi/bfa/bfa_ioc.h @@ -74,6 +74,7 @@ struct bfa_ioc_regs_s { bfa_os_addr_t lpu_mbox_cmd; bfa_os_addr_t lpu_mbox; bfa_os_addr_t pss_ctl_reg; + bfa_os_addr_t pss_err_status_reg; bfa_os_addr_t app_pll_fast_ctl_reg; bfa_os_addr_t app_pll_slow_ctl_reg; bfa_os_addr_t ioc_sem_reg; diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c index d1d625bcd721..1fa052ef9ce0 100644 --- a/drivers/scsi/bfa/bfa_ioc_cb.c +++ b/drivers/scsi/bfa/bfa_ioc_cb.c @@ -145,6 +145,7 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc) * PSS control registers */ ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG); + ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG); ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_400_CTL_REG); ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_212_CTL_REG); diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c index 5de9c24efacf..0430edd2e011 100644 --- a/drivers/scsi/bfa/bfa_ioc_ct.c +++ b/drivers/scsi/bfa/bfa_ioc_ct.c @@ -237,6 +237,7 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc) * PSS control registers */ ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG); + ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG); ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG); ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG); diff --git a/drivers/scsi/bfa/include/bfi/bfi_cbreg.h b/drivers/scsi/bfa/include/bfi/bfi_cbreg.h index 781cefafb659..a51ee61ddb19 100644 --- a/drivers/scsi/bfa/include/bfi/bfi_cbreg.h +++ b/drivers/scsi/bfa/include/bfi/bfi_cbreg.h @@ -177,6 +177,19 @@ #define __PSS_LMEM_INIT_EN 0x00000100 #define __PSS_LPU1_RESET 0x00000002 #define __PSS_LPU0_RESET 0x00000001 +#define PSS_ERR_STATUS_REG 0x00018810 +#define __PSS_LMEM1_CORR_ERR 0x00000800 +#define __PSS_LMEM0_CORR_ERR 0x00000400 +#define __PSS_LMEM1_UNCORR_ERR 0x00000200 +#define __PSS_LMEM0_UNCORR_ERR 0x00000100 +#define __PSS_BAL_PERR 0x00000080 +#define __PSS_DIP_IF_ERR 0x00000040 +#define __PSS_IOH_IF_ERR 0x00000020 +#define __PSS_TDS_IF_ERR 0x00000010 +#define __PSS_RDS_IF_ERR 0x00000008 +#define __PSS_SGM_IF_ERR 0x00000004 +#define __PSS_LPU1_RAM_ERR 0x00000002 +#define __PSS_LPU0_RAM_ERR 0x00000001 #define ERR_SET_REG 0x00018818 #define __PSS_ERR_STATUS_SET 0x00000fff diff --git a/drivers/scsi/bfa/include/bfi/bfi_ctreg.h b/drivers/scsi/bfa/include/bfi/bfi_ctreg.h index d84ebae70cb4..57a8497105af 100644 --- a/drivers/scsi/bfa/include/bfi/bfi_ctreg.h +++ b/drivers/scsi/bfa/include/bfi/bfi_ctreg.h @@ -430,6 +430,29 @@ enum { #define __PSS_LMEM_INIT_EN 0x00000100 #define __PSS_LPU1_RESET 0x00000002 #define __PSS_LPU0_RESET 0x00000001 +#define PSS_ERR_STATUS_REG 0x00018810 +#define __PSS_LPU1_TCM_READ_ERR 0x00200000 +#define __PSS_LPU0_TCM_READ_ERR 0x00100000 +#define __PSS_LMEM5_CORR_ERR 0x00080000 +#define __PSS_LMEM4_CORR_ERR 0x00040000 +#define __PSS_LMEM3_CORR_ERR 0x00020000 +#define __PSS_LMEM2_CORR_ERR 0x00010000 +#define __PSS_LMEM1_CORR_ERR 0x00008000 +#define __PSS_LMEM0_CORR_ERR 0x00004000 +#define __PSS_LMEM5_UNCORR_ERR 0x00002000 +#define __PSS_LMEM4_UNCORR_ERR 0x00001000 +#define __PSS_LMEM3_UNCORR_ERR 0x00000800 +#define __PSS_LMEM2_UNCORR_ERR 0x00000400 +#define __PSS_LMEM1_UNCORR_ERR 0x00000200 +#define __PSS_LMEM0_UNCORR_ERR 0x00000100 +#define __PSS_BAL_PERR 0x00000080 +#define __PSS_DIP_IF_ERR 0x00000040 +#define __PSS_IOH_IF_ERR 0x00000020 +#define __PSS_TDS_IF_ERR 0x00000010 +#define __PSS_RDS_IF_ERR 0x00000008 +#define __PSS_SGM_IF_ERR 0x00000004 +#define __PSS_LPU1_RAM_ERR 0x00000002 +#define __PSS_LPU0_RAM_ERR 0x00000001 #define ERR_SET_REG 0x00018818 #define __PSS_ERR_STATUS_SET 0x003fffff #define HQM_QSET0_RXQ_DRBL_P0 0x00038000 -- cgit v1.2.3 From e641de37e67953fa9ecad72608942481a5d66a1d Mon Sep 17 00:00:00 2001 From: Krishna Gudipati Date: Fri, 5 Mar 2010 19:35:02 -0800 Subject: [SCSI] bfa: Replace bfa_assert() with bfa_sm_fault() Replace bfa_assert() with bfa_sm_fault() to get unhandled events for debugging. Signed-off-by: Krishna Gudipati Signed-off-by: James Bottomley --- drivers/scsi/bfa/bfa_fcs_lport.c | 10 ++++----- drivers/scsi/bfa/bfa_ioim.c | 22 ++++++++++---------- drivers/scsi/bfa/bfa_itnim.c | 30 +++++++++++++-------------- drivers/scsi/bfa/bfa_lps.c | 12 +++++------ drivers/scsi/bfa/bfa_rport.c | 26 ++++++++++++------------ drivers/scsi/bfa/bfa_tskim.c | 14 ++++++------- drivers/scsi/bfa/fcpim.c | 16 +++++++-------- drivers/scsi/bfa/fdmi.c | 22 ++++++++++---------- drivers/scsi/bfa/ms.c | 22 ++++++++++---------- drivers/scsi/bfa/ns.c | 34 +++++++++++++++---------------- drivers/scsi/bfa/rport.c | 44 ++++++++++++++++++++-------------------- drivers/scsi/bfa/rport_ftrs.c | 12 +++++------ drivers/scsi/bfa/scn.c | 10 ++++----- drivers/scsi/bfa/vport.c | 18 ++++++++-------- 14 files changed, 146 insertions(+), 146 deletions(-) diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c index 3d62e456950b..960ae1a7bcd0 100644 --- a/drivers/scsi/bfa/bfa_fcs_lport.c +++ b/drivers/scsi/bfa/bfa_fcs_lport.c @@ -114,7 +114,7 @@ bfa_fcs_port_sm_uninit(struct bfa_fcs_port_s *port, break; default: - bfa_assert(0); + bfa_sm_fault(port->fcs, event); } } @@ -136,7 +136,7 @@ bfa_fcs_port_sm_init(struct bfa_fcs_port_s *port, enum bfa_fcs_port_event event) break; default: - bfa_assert(0); + bfa_sm_fault(port->fcs, event); } } @@ -176,7 +176,7 @@ bfa_fcs_port_sm_online(struct bfa_fcs_port_s *port, break; default: - bfa_assert(0); + bfa_sm_fault(port->fcs, event); } } @@ -214,7 +214,7 @@ bfa_fcs_port_sm_offline(struct bfa_fcs_port_s *port, break; default: - bfa_assert(0); + bfa_sm_fault(port->fcs, event); } } @@ -234,7 +234,7 @@ bfa_fcs_port_sm_deleting(struct bfa_fcs_port_s *port, break; default: - bfa_assert(0); + bfa_sm_fault(port->fcs, event); } } diff --git a/drivers/scsi/bfa/bfa_ioim.c b/drivers/scsi/bfa/bfa_ioim.c index f81d359b7089..5b107abe46e5 100644 --- a/drivers/scsi/bfa/bfa_ioim.c +++ b/drivers/scsi/bfa/bfa_ioim.c @@ -149,7 +149,7 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) break; default: - bfa_assert(0); + bfa_sm_fault(ioim->bfa, event); } } @@ -194,7 +194,7 @@ bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) break; default: - bfa_assert(0); + bfa_sm_fault(ioim->bfa, event); } } @@ -259,7 +259,7 @@ bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) break; default: - bfa_assert(0); + bfa_sm_fault(ioim->bfa, event); } } @@ -317,7 +317,7 @@ bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) break; default: - bfa_assert(0); + bfa_sm_fault(ioim->bfa, event); } } @@ -377,7 +377,7 @@ bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) break; default: - bfa_assert(0); + bfa_sm_fault(ioim->bfa, event); } } @@ -419,7 +419,7 @@ bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) break; default: - bfa_assert(0); + bfa_sm_fault(ioim->bfa, event); } } @@ -467,7 +467,7 @@ bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) break; default: - bfa_assert(0); + bfa_sm_fault(ioim->bfa, event); } } @@ -516,7 +516,7 @@ bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) break; default: - bfa_assert(0); + bfa_sm_fault(ioim->bfa, event); } } @@ -544,7 +544,7 @@ bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) break; default: - bfa_assert(0); + bfa_sm_fault(ioim->bfa, event); } } @@ -577,7 +577,7 @@ bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) break; default: - bfa_assert(0); + bfa_sm_fault(ioim->bfa, event); } } @@ -605,7 +605,7 @@ bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) break; default: - bfa_assert(0); + bfa_sm_fault(ioim->bfa, event); } } diff --git a/drivers/scsi/bfa/bfa_itnim.c b/drivers/scsi/bfa/bfa_itnim.c index eabf7d38bd09..a914ff255135 100644 --- a/drivers/scsi/bfa/bfa_itnim.c +++ b/drivers/scsi/bfa/bfa_itnim.c @@ -144,7 +144,7 @@ bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) break; default: - bfa_assert(0); + bfa_sm_fault(itnim->bfa, event); } } @@ -175,7 +175,7 @@ bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) break; default: - bfa_assert(0); + bfa_sm_fault(itnim->bfa, event); } } @@ -212,7 +212,7 @@ bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) break; default: - bfa_assert(0); + bfa_sm_fault(itnim->bfa, event); } } @@ -247,7 +247,7 @@ bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim, break; default: - bfa_assert(0); + bfa_sm_fault(itnim->bfa, event); } } @@ -275,7 +275,7 @@ bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim, break; default: - bfa_assert(0); + bfa_sm_fault(itnim->bfa, event); } } @@ -317,7 +317,7 @@ bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) break; default: - bfa_assert(0); + bfa_sm_fault(itnim->bfa, event); } } @@ -348,7 +348,7 @@ bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) break; default: - bfa_assert(0); + bfa_sm_fault(itnim->bfa, event); } } @@ -385,7 +385,7 @@ bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim, break; default: - bfa_assert(0); + bfa_sm_fault(itnim->bfa, event); } } @@ -413,7 +413,7 @@ bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim, break; default: - bfa_assert(0); + bfa_sm_fault(itnim->bfa, event); } } @@ -442,7 +442,7 @@ bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) break; default: - bfa_assert(0); + bfa_sm_fault(itnim->bfa, event); } } @@ -470,7 +470,7 @@ bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim, break; default: - bfa_assert(0); + bfa_sm_fault(itnim->bfa, event); } } @@ -502,7 +502,7 @@ bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) break; default: - bfa_assert(0); + bfa_sm_fault(itnim->bfa, event); } } @@ -538,7 +538,7 @@ bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim, break; default: - bfa_assert(0); + bfa_sm_fault(itnim->bfa, event); } } @@ -559,7 +559,7 @@ bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) break; default: - bfa_assert(0); + bfa_sm_fault(itnim->bfa, event); } } @@ -583,7 +583,7 @@ bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim, break; default: - bfa_assert(0); + bfa_sm_fault(itnim->bfa, event); } } diff --git a/drivers/scsi/bfa/bfa_lps.c b/drivers/scsi/bfa/bfa_lps.c index 66b9b15f4294..4c98bdab3119 100644 --- a/drivers/scsi/bfa/bfa_lps.c +++ b/drivers/scsi/bfa/bfa_lps.c @@ -121,7 +121,7 @@ bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event) break; default: - bfa_assert(0); + bfa_sm_fault(lps->bfa, event); } } @@ -148,7 +148,7 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event) break; default: - bfa_assert(0); + bfa_sm_fault(lps->bfa, event); } } @@ -180,7 +180,7 @@ bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event) break; default: - bfa_assert(0); + bfa_sm_fault(lps->bfa, event); } } @@ -219,7 +219,7 @@ bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event) break; default: - bfa_assert(0); + bfa_sm_fault(lps->bfa, event); } } @@ -243,7 +243,7 @@ bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event) break; default: - bfa_assert(0); + bfa_sm_fault(lps->bfa, event); } } @@ -268,7 +268,7 @@ bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event) break; default: - bfa_assert(0); + bfa_sm_fault(lps->bfa, event); } } diff --git a/drivers/scsi/bfa/bfa_rport.c b/drivers/scsi/bfa/bfa_rport.c index 3e1990a74258..7c509fa244e4 100644 --- a/drivers/scsi/bfa/bfa_rport.c +++ b/drivers/scsi/bfa/bfa_rport.c @@ -114,7 +114,7 @@ bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event) default: bfa_stats(rp, sm_un_unexp); - bfa_assert(0); + bfa_sm_fault(rp->bfa, event); } } @@ -146,7 +146,7 @@ bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event) default: bfa_stats(rp, sm_cr_unexp); - bfa_assert(0); + bfa_sm_fault(rp->bfa, event); } } @@ -183,7 +183,7 @@ bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event) default: bfa_stats(rp, sm_fwc_unexp); - bfa_assert(0); + bfa_sm_fault(rp->bfa, event); } } @@ -224,7 +224,7 @@ bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event) default: bfa_stats(rp, sm_fwc_unexp); - bfa_assert(0); + bfa_sm_fault(rp->bfa, event); } } @@ -296,7 +296,7 @@ bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event) default: bfa_stats(rp, sm_on_unexp); - bfa_assert(0); + bfa_sm_fault(rp->bfa, event); } } @@ -329,7 +329,7 @@ bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event) default: bfa_stats(rp, sm_fwd_unexp); - bfa_assert(0); + bfa_sm_fault(rp->bfa, event); } } @@ -359,7 +359,7 @@ bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event) default: bfa_stats(rp, sm_fwd_unexp); - bfa_assert(0); + bfa_sm_fault(rp->bfa, event); } } @@ -394,7 +394,7 @@ bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event) default: bfa_stats(rp, sm_off_unexp); - bfa_assert(0); + bfa_sm_fault(rp->bfa, event); } } @@ -421,7 +421,7 @@ bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event) break; default: - bfa_assert(0); + bfa_sm_fault(rp->bfa, event); } } @@ -446,7 +446,7 @@ bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event) break; default: - bfa_assert(0); + bfa_sm_fault(rp->bfa, event); } } @@ -477,7 +477,7 @@ bfa_rport_sm_delete_pending(struct bfa_rport_s *rp, default: bfa_stats(rp, sm_delp_unexp); - bfa_assert(0); + bfa_sm_fault(rp->bfa, event); } } @@ -512,7 +512,7 @@ bfa_rport_sm_offline_pending(struct bfa_rport_s *rp, default: bfa_stats(rp, sm_offp_unexp); - bfa_assert(0); + bfa_sm_fault(rp->bfa, event); } } @@ -550,7 +550,7 @@ bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event) default: bfa_stats(rp, sm_iocd_unexp); - bfa_assert(0); + bfa_sm_fault(rp->bfa, event); } } diff --git a/drivers/scsi/bfa/bfa_tskim.c b/drivers/scsi/bfa/bfa_tskim.c index ff7a4dc0bf3c..ad9aaaedd3f1 100644 --- a/drivers/scsi/bfa/bfa_tskim.c +++ b/drivers/scsi/bfa/bfa_tskim.c @@ -110,7 +110,7 @@ bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) break; default: - bfa_assert(0); + bfa_sm_fault(tskim->bfa, event); } } @@ -146,7 +146,7 @@ bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) break; default: - bfa_assert(0); + bfa_sm_fault(tskim->bfa, event); } } @@ -178,7 +178,7 @@ bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) break; default: - bfa_assert(0); + bfa_sm_fault(tskim->bfa, event); } } @@ -207,7 +207,7 @@ bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) break; default: - bfa_assert(0); + bfa_sm_fault(tskim->bfa, event); } } @@ -242,7 +242,7 @@ bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) break; default: - bfa_assert(0); + bfa_sm_fault(tskim->bfa, event); } } @@ -277,7 +277,7 @@ bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim, break; default: - bfa_assert(0); + bfa_sm_fault(tskim->bfa, event); } } @@ -303,7 +303,7 @@ bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) break; default: - bfa_assert(0); + bfa_sm_fault(tskim->bfa, event); } } diff --git a/drivers/scsi/bfa/fcpim.c b/drivers/scsi/bfa/fcpim.c index 06f8a46d1977..71d23d19bc9d 100644 --- a/drivers/scsi/bfa/fcpim.c +++ b/drivers/scsi/bfa/fcpim.c @@ -126,7 +126,7 @@ bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim, break; default: - bfa_assert(0); + bfa_sm_fault(itnim->fcs, event); } } @@ -161,7 +161,7 @@ bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim, break; default: - bfa_assert(0); + bfa_sm_fault(itnim->fcs, event); } } @@ -205,7 +205,7 @@ bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim, break; default: - bfa_assert(0); + bfa_sm_fault(itnim->fcs, event); } } @@ -240,7 +240,7 @@ bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim, break; default: - bfa_assert(0); + bfa_sm_fault(itnim->fcs, event); } } @@ -270,7 +270,7 @@ bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim, break; default: - bfa_assert(0); + bfa_sm_fault(itnim->fcs, event); } } @@ -298,7 +298,7 @@ bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim, break; default: - bfa_assert(0); + bfa_sm_fault(itnim->fcs, event); } } @@ -321,7 +321,7 @@ bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim, break; default: - bfa_assert(0); + bfa_sm_fault(itnim->fcs, event); } } @@ -354,7 +354,7 @@ bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim, break; default: - bfa_assert(0); + bfa_sm_fault(itnim->fcs, event); } } diff --git a/drivers/scsi/bfa/fdmi.c b/drivers/scsi/bfa/fdmi.c index d76d9220b6e6..e8120868701b 100644 --- a/drivers/scsi/bfa/fdmi.c +++ b/drivers/scsi/bfa/fdmi.c @@ -158,7 +158,7 @@ bfa_fcs_port_fdmi_sm_offline(struct bfa_fcs_port_fdmi_s *fdmi, break; default: - bfa_assert(0); + bfa_sm_fault(port->fcs, event); } } @@ -183,7 +183,7 @@ bfa_fcs_port_fdmi_sm_sending_rhba(struct bfa_fcs_port_fdmi_s *fdmi, break; default: - bfa_assert(0); + bfa_sm_fault(port->fcs, event); } } @@ -230,7 +230,7 @@ bfa_fcs_port_fdmi_sm_rhba(struct bfa_fcs_port_fdmi_s *fdmi, break; default: - bfa_assert(0); + bfa_sm_fault(port->fcs, event); } } @@ -258,7 +258,7 @@ bfa_fcs_port_fdmi_sm_rhba_retry(struct bfa_fcs_port_fdmi_s *fdmi, break; default: - bfa_assert(0); + bfa_sm_fault(port->fcs, event); } } @@ -286,7 +286,7 @@ bfa_fcs_port_fdmi_sm_sending_rprt(struct bfa_fcs_port_fdmi_s *fdmi, break; default: - bfa_assert(0); + bfa_sm_fault(port->fcs, event); } } @@ -331,7 +331,7 @@ bfa_fcs_port_fdmi_sm_rprt(struct bfa_fcs_port_fdmi_s *fdmi, break; default: - bfa_assert(0); + bfa_sm_fault(port->fcs, event); } } @@ -359,7 +359,7 @@ bfa_fcs_port_fdmi_sm_rprt_retry(struct bfa_fcs_port_fdmi_s *fdmi, break; default: - bfa_assert(0); + bfa_sm_fault(port->fcs, event); } } @@ -387,7 +387,7 @@ bfa_fcs_port_fdmi_sm_sending_rpa(struct bfa_fcs_port_fdmi_s *fdmi, break; default: - bfa_assert(0); + bfa_sm_fault(port->fcs, event); } } @@ -431,7 +431,7 @@ bfa_fcs_port_fdmi_sm_rpa(struct bfa_fcs_port_fdmi_s *fdmi, break; default: - bfa_assert(0); + bfa_sm_fault(port->fcs, event); } } @@ -459,7 +459,7 @@ bfa_fcs_port_fdmi_sm_rpa_retry(struct bfa_fcs_port_fdmi_s *fdmi, break; default: - bfa_assert(0); + bfa_sm_fault(port->fcs, event); } } @@ -478,7 +478,7 @@ bfa_fcs_port_fdmi_sm_online(struct bfa_fcs_port_fdmi_s *fdmi, break; default: - bfa_assert(0); + bfa_sm_fault(port->fcs, event); } } diff --git a/drivers/scsi/bfa/ms.c b/drivers/scsi/bfa/ms.c index c96b3ca007ae..f0275a409fd2 100644 --- a/drivers/scsi/bfa/ms.c +++ b/drivers/scsi/bfa/ms.c @@ -118,7 +118,7 @@ bfa_fcs_port_ms_sm_offline(struct bfa_fcs_port_ms_s *ms, break; default: - bfa_assert(0); + bfa_sm_fault(ms->port->fcs, event); } } @@ -141,7 +141,7 @@ bfa_fcs_port_ms_sm_plogi_sending(struct bfa_fcs_port_ms_s *ms, break; default: - bfa_assert(0); + bfa_sm_fault(ms->port->fcs, event); } } @@ -190,7 +190,7 @@ bfa_fcs_port_ms_sm_plogi(struct bfa_fcs_port_ms_s *ms, enum port_ms_event event) break; default: - bfa_assert(0); + bfa_sm_fault(ms->port->fcs, event); } } @@ -216,7 +216,7 @@ bfa_fcs_port_ms_sm_plogi_retry(struct bfa_fcs_port_ms_s *ms, break; default: - bfa_assert(0); + bfa_sm_fault(ms->port->fcs, event); } } @@ -243,7 +243,7 @@ bfa_fcs_port_ms_sm_online(struct bfa_fcs_port_ms_s *ms, break; default: - bfa_assert(0); + bfa_sm_fault(ms->port->fcs, event); } } @@ -266,7 +266,7 @@ bfa_fcs_port_ms_sm_gmal_sending(struct bfa_fcs_port_ms_s *ms, break; default: - bfa_assert(0); + bfa_sm_fault(ms->port->fcs, event); } } @@ -304,7 +304,7 @@ bfa_fcs_port_ms_sm_gmal(struct bfa_fcs_port_ms_s *ms, enum port_ms_event event) break; default: - bfa_assert(0); + bfa_sm_fault(ms->port->fcs, event); } } @@ -330,7 +330,7 @@ bfa_fcs_port_ms_sm_gmal_retry(struct bfa_fcs_port_ms_s *ms, break; default: - bfa_assert(0); + bfa_sm_fault(ms->port->fcs, event); } } @@ -466,7 +466,7 @@ bfa_fcs_port_ms_sm_gfn_sending(struct bfa_fcs_port_ms_s *ms, break; default: - bfa_assert(0); + bfa_sm_fault(ms->port->fcs, event); } } @@ -502,7 +502,7 @@ bfa_fcs_port_ms_sm_gfn(struct bfa_fcs_port_ms_s *ms, enum port_ms_event event) break; default: - bfa_assert(0); + bfa_sm_fault(ms->port->fcs, event); } } @@ -528,7 +528,7 @@ bfa_fcs_port_ms_sm_gfn_retry(struct bfa_fcs_port_ms_s *ms, break; default: - bfa_assert(0); + bfa_sm_fault(ms->port->fcs, event); } } diff --git a/drivers/scsi/bfa/ns.c b/drivers/scsi/bfa/ns.c index 2f8b880060bb..6de06a557e2d 100644 --- a/drivers/scsi/bfa/ns.c +++ b/drivers/scsi/bfa/ns.c @@ -164,7 +164,7 @@ bfa_fcs_port_ns_sm_offline(struct bfa_fcs_port_ns_s *ns, break; default: - bfa_assert(0); + bfa_sm_fault(ns->port->fcs, event); } } @@ -187,7 +187,7 @@ bfa_fcs_port_ns_sm_plogi_sending(struct bfa_fcs_port_ns_s *ns, break; default: - bfa_assert(0); + bfa_sm_fault(ns->port->fcs, event); } } @@ -221,7 +221,7 @@ bfa_fcs_port_ns_sm_plogi(struct bfa_fcs_port_ns_s *ns, break; default: - bfa_assert(0); + bfa_sm_fault(ns->port->fcs, event); } } @@ -247,7 +247,7 @@ bfa_fcs_port_ns_sm_plogi_retry(struct bfa_fcs_port_ns_s *ns, break; default: - bfa_assert(0); + bfa_sm_fault(ns->port->fcs, event); } } @@ -270,7 +270,7 @@ bfa_fcs_port_ns_sm_sending_rspn_id(struct bfa_fcs_port_ns_s *ns, break; default: - bfa_assert(0); + bfa_sm_fault(ns->port->fcs, event); } } @@ -304,7 +304,7 @@ bfa_fcs_port_ns_sm_rspn_id(struct bfa_fcs_port_ns_s *ns, break; default: - bfa_assert(0); + bfa_sm_fault(ns->port->fcs, event); } } @@ -330,7 +330,7 @@ bfa_fcs_port_ns_sm_rspn_id_retry(struct bfa_fcs_port_ns_s *ns, break; default: - bfa_assert(0); + bfa_sm_fault(ns->port->fcs, event); } } @@ -353,7 +353,7 @@ bfa_fcs_port_ns_sm_sending_rft_id(struct bfa_fcs_port_ns_s *ns, break; default: - bfa_assert(0); + bfa_sm_fault(ns->port->fcs, event); } } @@ -390,7 +390,7 @@ bfa_fcs_port_ns_sm_rft_id(struct bfa_fcs_port_ns_s *ns, break; default: - bfa_assert(0); + bfa_sm_fault(ns->port->fcs, event); } } @@ -413,7 +413,7 @@ bfa_fcs_port_ns_sm_rft_id_retry(struct bfa_fcs_port_ns_s *ns, break; default: - bfa_assert(0); + bfa_sm_fault(ns->port->fcs, event); } } @@ -436,7 +436,7 @@ bfa_fcs_port_ns_sm_sending_rff_id(struct bfa_fcs_port_ns_s *ns, break; default: - bfa_assert(0); + bfa_sm_fault(ns->port->fcs, event); } } @@ -494,7 +494,7 @@ bfa_fcs_port_ns_sm_rff_id(struct bfa_fcs_port_ns_s *ns, break; default: - bfa_assert(0); + bfa_sm_fault(ns->port->fcs, event); } } @@ -517,7 +517,7 @@ bfa_fcs_port_ns_sm_rff_id_retry(struct bfa_fcs_port_ns_s *ns, break; default: - bfa_assert(0); + bfa_sm_fault(ns->port->fcs, event); } } static void @@ -539,7 +539,7 @@ bfa_fcs_port_ns_sm_sending_gid_ft(struct bfa_fcs_port_ns_s *ns, break; default: - bfa_assert(0); + bfa_sm_fault(ns->port->fcs, event); } } @@ -575,7 +575,7 @@ bfa_fcs_port_ns_sm_gid_ft(struct bfa_fcs_port_ns_s *ns, break; default: - bfa_assert(0); + bfa_sm_fault(ns->port->fcs, event); } } @@ -598,7 +598,7 @@ bfa_fcs_port_ns_sm_gid_ft_retry(struct bfa_fcs_port_ns_s *ns, break; default: - bfa_assert(0); + bfa_sm_fault(ns->port->fcs, event); } } @@ -626,7 +626,7 @@ bfa_fcs_port_ns_sm_online(struct bfa_fcs_port_ns_s *ns, break; default: - bfa_assert(0); + bfa_sm_fault(ns->port->fcs, event); } } diff --git a/drivers/scsi/bfa/rport.c b/drivers/scsi/bfa/rport.c index df714dcdf031..32cf180ec791 100644 --- a/drivers/scsi/bfa/rport.c +++ b/drivers/scsi/bfa/rport.c @@ -224,7 +224,7 @@ bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, enum rport_event event) break; default: - bfa_assert(0); + bfa_sm_fault(rport->fcs, event); } } @@ -276,7 +276,7 @@ bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport, break; default: - bfa_assert(0); + bfa_sm_fault(rport->fcs, event); } } @@ -332,7 +332,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport, break; default: - bfa_assert(0); + bfa_sm_fault(rport->fcs, event); } } @@ -406,7 +406,7 @@ bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport, break; default: - bfa_assert(0); + bfa_sm_fault(rport->fcs, event); } } @@ -481,7 +481,7 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event) break; default: - bfa_assert(0); + bfa_sm_fault(rport->fcs, event); } } @@ -534,7 +534,7 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport, break; default: - bfa_assert(0); + bfa_sm_fault(rport->fcs, event); } } @@ -589,7 +589,7 @@ bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event) break; default: - bfa_assert(0); + bfa_sm_fault(rport->fcs, event); } } @@ -646,7 +646,7 @@ bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport, break; default: - bfa_assert(0); + bfa_sm_fault(rport->fcs, event); } } @@ -704,7 +704,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event) break; default: - bfa_assert(0); + bfa_sm_fault(rport->fcs, event); } } @@ -754,7 +754,7 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport, break; default: - bfa_assert(0); + bfa_sm_fault(rport->fcs, event); } } @@ -816,7 +816,7 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event) break; default: - bfa_assert(0); + bfa_sm_fault(rport->fcs, event); } } @@ -846,7 +846,7 @@ bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport, break; default: - bfa_assert(0); + bfa_sm_fault(rport->fcs, event); } } @@ -869,7 +869,7 @@ bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport, break; default: - bfa_assert(0); + bfa_sm_fault(rport->fcs, event); } } @@ -905,7 +905,7 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport, break; default: - bfa_assert(0); + bfa_sm_fault(rport->fcs, event); } } @@ -951,7 +951,7 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport, break; default: - bfa_assert(0); + bfa_sm_fault(rport->fcs, event); } } @@ -1011,7 +1011,7 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport, break; default: - bfa_assert(0); + bfa_sm_fault(rport->fcs, event); } } @@ -1038,7 +1038,7 @@ bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport, break; default: - bfa_assert(0); + bfa_sm_fault(rport->fcs, event); } } @@ -1073,7 +1073,7 @@ bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport, break; default: - bfa_assert(0); + bfa_sm_fault(rport->fcs, event); } } @@ -1132,7 +1132,7 @@ bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event) break; default: - bfa_assert(0); + bfa_sm_fault(rport->fcs, event); } } @@ -1188,7 +1188,7 @@ bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport, break; default: - bfa_assert(0); + bfa_sm_fault(rport->fcs, event); } } @@ -1249,7 +1249,7 @@ bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport, break; default: - bfa_assert(0); + bfa_sm_fault(rport->fcs, event); } } @@ -1334,7 +1334,7 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport, break; default: - bfa_assert(0); + bfa_sm_fault(rport->fcs, event); } } diff --git a/drivers/scsi/bfa/rport_ftrs.c b/drivers/scsi/bfa/rport_ftrs.c index e1932c885ac2..ae7bba67ae2a 100644 --- a/drivers/scsi/bfa/rport_ftrs.c +++ b/drivers/scsi/bfa/rport_ftrs.c @@ -91,7 +91,7 @@ bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) break; default: - bfa_assert(0); + bfa_sm_fault(rport->fcs, event); } } @@ -114,7 +114,7 @@ bfa_fcs_rpf_sm_rpsc_sending(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) break; default: - bfa_assert(0); + bfa_sm_fault(rport->fcs, event); } } @@ -160,7 +160,7 @@ bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) break; default: - bfa_assert(0); + bfa_sm_fault(rport->fcs, event); } } @@ -186,7 +186,7 @@ bfa_fcs_rpf_sm_rpsc_retry(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) break; default: - bfa_assert(0); + bfa_sm_fault(rport->fcs, event); } } @@ -206,7 +206,7 @@ bfa_fcs_rpf_sm_online(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) break; default: - bfa_assert(0); + bfa_sm_fault(rport->fcs, event); } } @@ -229,7 +229,7 @@ bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) break; default: - bfa_assert(0); + bfa_sm_fault(rport->fcs, event); } } /** diff --git a/drivers/scsi/bfa/scn.c b/drivers/scsi/bfa/scn.c index bd4771ff62c8..8fe09ba88a91 100644 --- a/drivers/scsi/bfa/scn.c +++ b/drivers/scsi/bfa/scn.c @@ -90,7 +90,7 @@ bfa_fcs_port_scn_sm_offline(struct bfa_fcs_port_scn_s *scn, break; default: - bfa_assert(0); + bfa_sm_fault(scn->port->fcs, event); } } @@ -109,7 +109,7 @@ bfa_fcs_port_scn_sm_sending_scr(struct bfa_fcs_port_scn_s *scn, break; default: - bfa_assert(0); + bfa_sm_fault(scn->port->fcs, event); } } @@ -137,7 +137,7 @@ bfa_fcs_port_scn_sm_scr(struct bfa_fcs_port_scn_s *scn, break; default: - bfa_assert(0); + bfa_sm_fault(scn->port->fcs, event); } } @@ -157,7 +157,7 @@ bfa_fcs_port_scn_sm_scr_retry(struct bfa_fcs_port_scn_s *scn, break; default: - bfa_assert(0); + bfa_sm_fault(scn->port->fcs, event); } } @@ -171,7 +171,7 @@ bfa_fcs_port_scn_sm_online(struct bfa_fcs_port_scn_s *scn, break; default: - bfa_assert(0); + bfa_sm_fault(scn->port->fcs, event); } } diff --git a/drivers/scsi/bfa/vport.c b/drivers/scsi/bfa/vport.c index 13f737139379..14fb7ac2bfbc 100644 --- a/drivers/scsi/bfa/vport.c +++ b/drivers/scsi/bfa/vport.c @@ -122,7 +122,7 @@ bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport, break; default: - bfa_assert(0); + bfa_sm_fault(__vport_fcs(vport), event); } } @@ -165,7 +165,7 @@ bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport, break; default: - bfa_assert(0); + bfa_sm_fault(__vport_fcs(vport), event); } } @@ -202,7 +202,7 @@ bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport, break; default: - bfa_assert(0); + bfa_sm_fault(__vport_fcs(vport), event); } } @@ -249,7 +249,7 @@ bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport, break; default: - bfa_assert(0); + bfa_sm_fault(__vport_fcs(vport), event); } } @@ -283,7 +283,7 @@ bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport, break; default: - bfa_assert(0); + bfa_sm_fault(__vport_fcs(vport), event); } } @@ -310,7 +310,7 @@ bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport, break; default: - bfa_assert(0); + bfa_sm_fault(__vport_fcs(vport), event); } } @@ -339,7 +339,7 @@ bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport, break; default: - bfa_assert(0); + bfa_sm_fault(__vport_fcs(vport), event); } } @@ -387,7 +387,7 @@ bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport, break; default: - bfa_assert(0); + bfa_sm_fault(__vport_fcs(vport), event); } } @@ -419,7 +419,7 @@ bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport, break; default: - bfa_assert(0); + bfa_sm_fault(__vport_fcs(vport), event); } } -- cgit v1.2.3 From 72041ed8fc8ed92c11af90949bab7b08f3e34fd3 Mon Sep 17 00:00:00 2001 From: Krishna Gudipati Date: Fri, 5 Mar 2010 19:35:16 -0800 Subject: [SCSI] bfa: RPORT state machine: direct attach mode fix. Make sure that in direct attach mode, we do not query the name server after a target is marked offline. Signed-off-by: Krishna Gudipati Signed-off-by: James Bottomley --- drivers/scsi/bfa/rport.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/bfa/rport.c b/drivers/scsi/bfa/rport.c index 32cf180ec791..80592e352226 100644 --- a/drivers/scsi/bfa/rport.c +++ b/drivers/scsi/bfa/rport.c @@ -925,10 +925,17 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport, case RPSM_EVENT_HCB_OFFLINE: case RPSM_EVENT_ADDRESS_CHANGE: if (bfa_fcs_port_is_online(rport->port)) { - bfa_sm_set_state(rport, - bfa_fcs_rport_sm_nsdisc_sending); - rport->ns_retries = 0; - bfa_fcs_rport_send_gidpn(rport, NULL); + if (bfa_fcs_fabric_is_switched(rport->port->fabric)) { + bfa_sm_set_state(rport, + bfa_fcs_rport_sm_nsdisc_sending); + rport->ns_retries = 0; + bfa_fcs_rport_send_gidpn(rport, NULL); + } else { + bfa_sm_set_state(rport, + bfa_fcs_rport_sm_plogi_sending); + rport->plogi_retries = 0; + bfa_fcs_rport_send_plogi(rport, NULL); + } } else { rport->pid = 0; bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline); -- cgit v1.2.3 From 86e32dabbad0d860b2be3c30a33c10a134d4ccf1 Mon Sep 17 00:00:00 2001 From: Krishna Gudipati Date: Fri, 5 Mar 2010 19:35:33 -0800 Subject: [SCSI] bfa: Fix to copy fpma MAC when requested by user space application. Copy fpma MAC when requested by user space application. Added FPMA mac address to the lport attributes structure. Signed-off-by: Krishna Gudipati Signed-off-by: James Bottomley --- drivers/scsi/bfa/bfa_fcs_lport.c | 7 ++++++- drivers/scsi/bfa/bfa_lps.c | 12 ++++++++++-- drivers/scsi/bfa/include/bfa_svc.h | 1 + drivers/scsi/bfa/include/defs/bfa_defs_port.h | 4 +++- 4 files changed, 20 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c index 960ae1a7bcd0..7bb182dcbd7d 100644 --- a/drivers/scsi/bfa/bfa_fcs_lport.c +++ b/drivers/scsi/bfa/bfa_fcs_lport.c @@ -936,8 +936,13 @@ bfa_fcs_port_get_attr(struct bfa_fcs_port_s *port, bfa_fcs_port_get_fabric_ipaddr(port), BFA_FCS_FABRIC_IPADDR_SZ); - if (port->vport != NULL) + if (port->vport != NULL) { port_attr->port_type = BFA_PPORT_TYPE_VPORT; + port_attr->fpma_mac = + bfa_lps_get_lp_mac(port->vport->lps); + } else + port_attr->fpma_mac = + bfa_lps_get_lp_mac(port->fabric->lps); } else { port_attr->port_type = BFA_PPORT_TYPE_UNKNOWN; diff --git a/drivers/scsi/bfa/bfa_lps.c b/drivers/scsi/bfa/bfa_lps.c index 4c98bdab3119..730616f6e671 100644 --- a/drivers/scsi/bfa/bfa_lps.c +++ b/drivers/scsi/bfa/bfa_lps.c @@ -613,9 +613,9 @@ bfa_lps_get_max_vport(struct bfa_s *bfa) bfa_get_attr(bfa, &ioc_attr); if (ioc_attr.pci_attr.device_id == BFA_PCI_DEVICE_ID_CT) - return (BFA_LPS_MAX_VPORTS_SUPP_CT); + return BFA_LPS_MAX_VPORTS_SUPP_CT; else - return (BFA_LPS_MAX_VPORTS_SUPP_CB); + return BFA_LPS_MAX_VPORTS_SUPP_CB; } /** @@ -837,6 +837,14 @@ bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps) return lps->lsrjt_expl; } +/** + * Return fpma/spma MAC for lport + */ +struct mac_s +bfa_lps_get_lp_mac(struct bfa_lps_s *lps) +{ + return lps->lp_mac; +} /** * LPS firmware message class handler. diff --git a/drivers/scsi/bfa/include/bfa_svc.h b/drivers/scsi/bfa/include/bfa_svc.h index 0d7ed4d963a7..71ffb75a71ca 100644 --- a/drivers/scsi/bfa/include/bfa_svc.h +++ b/drivers/scsi/bfa/include/bfa_svc.h @@ -316,6 +316,7 @@ wwn_t bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps); wwn_t bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps); u8 bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps); u8 bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps); +mac_t bfa_lps_get_lp_mac(struct bfa_lps_s *lps); void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status); void bfa_cb_lps_flogo_comp(void *bfad, void *uarg); void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status); diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_port.h b/drivers/scsi/bfa/include/defs/bfa_defs_port.h index de0696c81bc4..1c74a8b94aad 100644 --- a/drivers/scsi/bfa/include/defs/bfa_defs_port.h +++ b/drivers/scsi/bfa/include/defs/bfa_defs_port.h @@ -185,6 +185,8 @@ struct bfa_port_attr_s { wwn_t fabric_name; /* attached switch's nwwn */ u8 fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ]; /* attached * fabric's ip addr */ + struct mac_s fpma_mac; /* Lport's FPMA Mac address */ + u16 authfail; /* auth failed state */ }; /** @@ -235,7 +237,7 @@ struct bfa_port_aen_data_s { enum bfa_ioc_type_e ioc_type; wwn_t pwwn; /* WWN of the physical port */ wwn_t fwwn; /* WWN of the fabric port */ - mac_t mac; /* MAC addres of the ethernet port, + mac_t mac; /* MAC address of the ethernet port, * applicable to CNA port only */ int phy_port_num; /*! For SFP related events */ enum bfa_port_aen_sfp_pom level; /* Only transitions will -- cgit v1.2.3 From 7af074dc9d343f69bab4bfd699e6d7ba09915fd9 Mon Sep 17 00:00:00 2001 From: Krishna Gudipati Date: Fri, 5 Mar 2010 19:35:45 -0800 Subject: [SCSI] bfa: PCI VPD, FIP and include file changes. Changed PCI VPD to incorporate specific OEM vendors. Added FCoE specific interrupt latency and delay params. Added some variables needed by FIP 2.0. Added some new logging and tracing definitions. Added reserved members to make the structures (IOC, IOCFC) 64bit aligned. Changed the module identifiers, as some files were moved. Signed-off-by: Krishna Gudipati Signed-off-by: James Bottomley --- drivers/scsi/bfa/bfa_ioc.c | 4 +- drivers/scsi/bfa/bfa_trcmod_priv.h | 62 +++++++------ drivers/scsi/bfa/include/defs/bfa_defs_cee.h | 14 ++- drivers/scsi/bfa/include/defs/bfa_defs_ioc.h | 1 + drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h | 11 ++- drivers/scsi/bfa/include/defs/bfa_defs_mfg.h | 111 +++++++++++++++++++++--- drivers/scsi/bfa/include/defs/bfa_defs_status.h | 12 ++- drivers/scsi/bfa/include/log/bfa_log_hal.h | 6 ++ drivers/scsi/bfa/include/log/bfa_log_linux.h | 16 ++++ 9 files changed, 178 insertions(+), 59 deletions(-) diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c index a5f9745315b1..8a6361cf2439 100644 --- a/drivers/scsi/bfa/bfa_ioc.c +++ b/drivers/scsi/bfa/bfa_ioc.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include #include @@ -27,7 +27,7 @@ #include #include -BFA_TRC_FILE(HAL, IOC); +BFA_TRC_FILE(CNA, IOC); /** * IOC local definitions diff --git a/drivers/scsi/bfa/bfa_trcmod_priv.h b/drivers/scsi/bfa/bfa_trcmod_priv.h index b3562dce7e9f..3d947d47b837 100644 --- a/drivers/scsi/bfa/bfa_trcmod_priv.h +++ b/drivers/scsi/bfa/bfa_trcmod_priv.h @@ -29,38 +29,36 @@ * !!! needed between trace utility and driver version */ enum { - BFA_TRC_HAL_IOC = 1, - BFA_TRC_HAL_INTR = 2, - BFA_TRC_HAL_FCXP = 3, - BFA_TRC_HAL_UF = 4, - BFA_TRC_HAL_DIAG = 5, - BFA_TRC_HAL_RPORT = 6, - BFA_TRC_HAL_FCPIM = 7, - BFA_TRC_HAL_IOIM = 8, - BFA_TRC_HAL_TSKIM = 9, - BFA_TRC_HAL_ITNIM = 10, - BFA_TRC_HAL_PPORT = 11, - BFA_TRC_HAL_SGPG = 12, - BFA_TRC_HAL_FLASH = 13, - BFA_TRC_HAL_DEBUG = 14, - BFA_TRC_HAL_WWN = 15, - BFA_TRC_HAL_FLASH_RAW = 16, - BFA_TRC_HAL_SBOOT = 17, - BFA_TRC_HAL_SBOOT_IO = 18, - BFA_TRC_HAL_SBOOT_INTR = 19, - BFA_TRC_HAL_SBTEST = 20, - BFA_TRC_HAL_IPFC = 21, - BFA_TRC_HAL_IOCFC = 22, - BFA_TRC_HAL_FCPTM = 23, - BFA_TRC_HAL_IOTM = 24, - BFA_TRC_HAL_TSKTM = 25, - BFA_TRC_HAL_TIN = 26, - BFA_TRC_HAL_LPS = 27, - BFA_TRC_HAL_FCDIAG = 28, - BFA_TRC_HAL_PBIND = 29, - BFA_TRC_HAL_IOCFC_CT = 30, - BFA_TRC_HAL_IOCFC_CB = 31, - BFA_TRC_HAL_IOCFC_Q = 32, + BFA_TRC_HAL_INTR = 1, + BFA_TRC_HAL_FCXP = 2, + BFA_TRC_HAL_UF = 3, + BFA_TRC_HAL_RPORT = 4, + BFA_TRC_HAL_FCPIM = 5, + BFA_TRC_HAL_IOIM = 6, + BFA_TRC_HAL_TSKIM = 7, + BFA_TRC_HAL_ITNIM = 8, + BFA_TRC_HAL_PPORT = 9, + BFA_TRC_HAL_SGPG = 10, + BFA_TRC_HAL_FLASH = 11, + BFA_TRC_HAL_DEBUG = 12, + BFA_TRC_HAL_WWN = 13, + BFA_TRC_HAL_FLASH_RAW = 14, + BFA_TRC_HAL_SBOOT = 15, + BFA_TRC_HAL_SBOOT_IO = 16, + BFA_TRC_HAL_SBOOT_INTR = 17, + BFA_TRC_HAL_SBTEST = 18, + BFA_TRC_HAL_IPFC = 19, + BFA_TRC_HAL_IOCFC = 20, + BFA_TRC_HAL_FCPTM = 21, + BFA_TRC_HAL_IOTM = 22, + BFA_TRC_HAL_TSKTM = 23, + BFA_TRC_HAL_TIN = 24, + BFA_TRC_HAL_LPS = 25, + BFA_TRC_HAL_FCDIAG = 26, + BFA_TRC_HAL_PBIND = 27, + BFA_TRC_HAL_IOCFC_CT = 28, + BFA_TRC_HAL_IOCFC_CB = 29, + BFA_TRC_HAL_IOCFC_Q = 30, }; #endif /* __BFA_TRCMOD_PRIV_H__ */ diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_cee.h b/drivers/scsi/bfa/include/defs/bfa_defs_cee.h index 520a22f52dd1..b0ac9ac15c5d 100644 --- a/drivers/scsi/bfa/include/defs/bfa_defs_cee.h +++ b/drivers/scsi/bfa/include/defs/bfa_defs_cee.h @@ -28,10 +28,6 @@ #define BFA_CEE_LLDP_MAX_STRING_LEN (128) - -/* FIXME: this is coming from the protocol spec. Can the host & apps share the - protocol .h files ? - */ #define BFA_CEE_LLDP_SYS_CAP_OTHER 0x0001 #define BFA_CEE_LLDP_SYS_CAP_REPEATER 0x0002 #define BFA_CEE_LLDP_SYS_CAP_MAC_BRIDGE 0x0004 @@ -94,9 +90,10 @@ struct bfa_cee_dcbx_cfg_s { /* CEE status */ /* Making this to tri-state for the benefit of port list command */ enum bfa_cee_status_e { - CEE_PHY_DOWN = 0, - CEE_PHY_UP = 1, - CEE_UP = 2, + CEE_UP = 0, + CEE_PHY_UP = 1, + CEE_LOOPBACK = 2, + CEE_PHY_DOWN = 3, }; /* CEE Query */ @@ -107,7 +104,8 @@ struct bfa_cee_attr_s { struct bfa_cee_dcbx_cfg_s dcbx_remote; mac_t src_mac; u8 link_speed; - u8 filler[3]; + u8 nw_priority; + u8 filler[2]; }; diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h b/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h index b1d532da3a9d..6c721b13aca4 100644 --- a/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h +++ b/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h @@ -126,6 +126,7 @@ struct bfa_ioc_attr_s { struct bfa_ioc_driver_attr_s driver_attr; /* driver attr */ struct bfa_ioc_pci_attr_s pci_attr; u8 port_id; /* port number */ + u8 rsvd[7]; /*!< 64bit align */ }; /** diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h b/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h index d76bcbd9820f..87f0401c6439 100644 --- a/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h +++ b/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h @@ -26,6 +26,8 @@ #define BFA_IOCFC_INTR_DELAY 1125 #define BFA_IOCFC_INTR_LATENCY 225 +#define BFA_IOCFCOE_INTR_DELAY 25 +#define BFA_IOCFCOE_INTR_LATENCY 5 /** * Interrupt coalescing configuration. @@ -50,7 +52,7 @@ struct bfa_iocfc_fwcfg_s { u16 num_fcxp_reqs; /* unassisted FC exchanges */ u16 num_uf_bufs; /* unsolicited recv buffers */ u8 num_cqs; - u8 rsvd; + u8 rsvd[5]; }; struct bfa_iocfc_drvcfg_s { @@ -224,6 +226,11 @@ struct bfa_fw_port_physm_stats_s { struct bfa_fw_fip_stats_s { + u32 vlan_req; /* vlan discovery requests */ + u32 vlan_notify; /* vlan notifications */ + u32 vlan_err; /* vlan response error */ + u32 vlan_timeouts; /* vlan disvoery timeouts */ + u32 vlan_invalids; /* invalid vlan in discovery advert. */ u32 disc_req; /* Discovery solicit requests */ u32 disc_rsp; /* Discovery solicit response */ u32 disc_err; /* Discovery advt. parse errors */ @@ -235,7 +242,7 @@ struct bfa_fw_fip_stats_s { u32 clrvlink_req; /* Clear virtual link req */ u32 op_unsupp; /* Unsupported FIP operation */ u32 untagged; /* Untagged frames (ignored) */ - u32 rsvd; + u32 invalid_version; /*!< Invalid FIP version */ }; diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h b/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h index 13fd4ab6aae2..c5bd9c36ad4d 100644 --- a/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h +++ b/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h @@ -22,7 +22,47 @@ /** * Manufacturing block version */ -#define BFA_MFG_VERSION 1 +#define BFA_MFG_VERSION 2 + +/** + * Manufacturing block encrypted version + */ +#define BFA_MFG_ENC_VER 2 + +/** + * Manufacturing block version 1 length + */ +#define BFA_MFG_VER1_LEN 128 + +/** + * Manufacturing block header length + */ +#define BFA_MFG_HDR_LEN 4 + +/** + * Checksum size + */ +#define BFA_MFG_CHKSUM_SIZE 16 + +/** + * Manufacturing block encrypted version + */ +#define BFA_MFG_ENC_VER 2 + +/** + * Manufacturing block version 1 length + */ +#define BFA_MFG_VER1_LEN 128 + +/** + * Manufacturing block header length + */ +#define BFA_MFG_HDR_LEN 4 + +/** + * Checksum size + */ +#define BFA_MFG_CHKSUM_SIZE 16 /** * Manufacturing block format @@ -30,29 +70,74 @@ #define BFA_MFG_SERIALNUM_SIZE 11 #define BFA_MFG_PARTNUM_SIZE 14 #define BFA_MFG_SUPPLIER_ID_SIZE 10 -#define BFA_MFG_SUPPLIER_PARTNUM_SIZE 20 -#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20 -#define BFA_MFG_SUPPLIER_REVISION_SIZE 4 +#define BFA_MFG_SUPPLIER_PARTNUM_SIZE 20 +#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20 +#define BFA_MFG_SUPPLIER_REVISION_SIZE 4 #define STRSZ(_n) (((_n) + 4) & ~3) +/** + * Manufacturing card type + */ +enum { + BFA_MFG_TYPE_CB_MAX = 825, /* Crossbow card type max */ + BFA_MFG_TYPE_FC8P2 = 825, /* 8G 2port FC card */ + BFA_MFG_TYPE_FC8P1 = 815, /* 8G 1port FC card */ + BFA_MFG_TYPE_FC4P2 = 425, /* 4G 2port FC card */ + BFA_MFG_TYPE_FC4P1 = 415, /* 4G 1port FC card */ + BFA_MFG_TYPE_CNA10P2 = 1020, /* 10G 2port CNA card */ + BFA_MFG_TYPE_CNA10P1 = 1010, /* 10G 1port CNA card */ +}; + +#pragma pack(1) + +/** + * Card type to port number conversion + */ +#define bfa_mfg_type2port_num(card_type) (((card_type) / 10) % 10) + + +/** + * All numerical fields are in big-endian format. + */ +struct bfa_mfg_block_s { +}; + /** * VPD data length */ -#define BFA_MFG_VPD_LEN 256 +#define BFA_MFG_VPD_LEN 512 + +#define BFA_MFG_VPD_PCI_HDR_OFF 137 +#define BFA_MFG_VPD_PCI_VER_MASK 0x07 /* version mask 3 bits */ +#define BFA_MFG_VPD_PCI_VDR_MASK 0xf8 /* vendor mask 5 bits */ + +/** + * VPD vendor tag + */ +enum { + BFA_MFG_VPD_UNKNOWN = 0, /* vendor unknown */ + BFA_MFG_VPD_IBM = 1, /* vendor IBM */ + BFA_MFG_VPD_HP = 2, /* vendor HP */ + BFA_MFG_VPD_DELL = 3, /* vendor DELL */ + BFA_MFG_VPD_PCI_IBM = 0x08, /* PCI VPD IBM */ + BFA_MFG_VPD_PCI_HP = 0x10, /* PCI VPD HP */ + BFA_MFG_VPD_PCI_DELL = 0x20, /* PCI VPD DELL */ + BFA_MFG_VPD_PCI_BRCD = 0xf8, /* PCI VPD Brocade */ +}; /** * All numerical fields are in big-endian format. */ struct bfa_mfg_vpd_s { - u8 version; /* vpd data version */ - u8 vpd_sig[3]; /* characters 'V', 'P', 'D' */ - u8 chksum; /* u8 checksum */ - u8 vendor; /* vendor */ - u8 len; /* vpd data length excluding header */ - u8 rsv; - u8 data[BFA_MFG_VPD_LEN]; /* vpd data */ + u8 version; /* vpd data version */ + u8 vpd_sig[3]; /* characters 'V', 'P', 'D' */ + u8 chksum; /* u8 checksum */ + u8 vendor; /* vendor */ + u8 len; /* vpd data length excluding header */ + u8 rsv; + u8 data[BFA_MFG_VPD_LEN]; /* vpd data */ }; -#pragma pack(1) +#pragma pack() #endif /* __BFA_DEFS_MFG_H__ */ diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_status.h b/drivers/scsi/bfa/include/defs/bfa_defs_status.h index cdceaeb9f4b8..d8a74ebfe1ae 100644 --- a/drivers/scsi/bfa/include/defs/bfa_defs_status.h +++ b/drivers/scsi/bfa/include/defs/bfa_defs_status.h @@ -180,8 +180,8 @@ enum bfa_status { BFA_STATUS_IM_ADAPT_ALREADY_IN_TEAM = 114, /* Given adapter is part * of another team */ BFA_STATUS_IM_ADAPT_HAS_VLANS = 115, /* Adapter has VLANs configured. - * Delete all VLANs before - * creating team */ + * Delete all VLANs to become + * part of the team */ BFA_STATUS_IM_PVID_MISMATCH = 116, /* Mismatching PVIDs configured * for adapters */ BFA_STATUS_IM_LINK_SPEED_MISMATCH = 117, /* Mismatching link speeds @@ -242,6 +242,14 @@ enum bfa_status { * failed */ BFA_STATUS_IM_UNBIND_FAILED = 149, /* ! < IM Driver unbind operation * failed */ + BFA_STATUS_IM_PORT_IN_TEAM = 150, /* Port is already part of the + * team */ + BFA_STATUS_IM_VLAN_NOT_FOUND = 151, /* VLAN ID doesn't exists */ + BFA_STATUS_IM_TEAM_NOT_FOUND = 152, /* Teaming configuration doesn't + * exists */ + BFA_STATUS_IM_TEAM_CFG_NOT_ALLOWED = 153, /* Given settings are not + * allowed for the current + * Teaming mode */ BFA_STATUS_MAX_VAL /* Unknown error code */ }; #define bfa_status_t enum bfa_status diff --git a/drivers/scsi/bfa/include/log/bfa_log_hal.h b/drivers/scsi/bfa/include/log/bfa_log_hal.h index 0412aea2ec30..5f8f5e30b9e8 100644 --- a/drivers/scsi/bfa/include/log/bfa_log_hal.h +++ b/drivers/scsi/bfa/include/log/bfa_log_hal.h @@ -27,4 +27,10 @@ (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 3) #define BFA_LOG_HAL_SM_ASSERT \ (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 4) +#define BFA_LOG_HAL_DRIVER_ERROR \ + (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 5) +#define BFA_LOG_HAL_DRIVER_CONFIG_ERROR \ + (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 6) +#define BFA_LOG_HAL_MBOX_ERROR \ + (((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 7) #endif diff --git a/drivers/scsi/bfa/include/log/bfa_log_linux.h b/drivers/scsi/bfa/include/log/bfa_log_linux.h index 317c0547ee16..bd451db4c30a 100644 --- a/drivers/scsi/bfa/include/log/bfa_log_linux.h +++ b/drivers/scsi/bfa/include/log/bfa_log_linux.h @@ -41,4 +41,20 @@ (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 10) #define BFA_LOG_LINUX_SCSI_ABORT_COMP \ (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 11) +#define BFA_LOG_LINUX_DRIVER_CONFIG_ERROR \ + (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 12) +#define BFA_LOG_LINUX_BNA_STATE_MACHINE \ + (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 13) +#define BFA_LOG_LINUX_IOC_ERROR \ + (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 14) +#define BFA_LOG_LINUX_RESOURCE_ALLOC_ERROR \ + (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 15) +#define BFA_LOG_LINUX_RING_BUFFER_ERROR \ + (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 16) +#define BFA_LOG_LINUX_DRIVER_ERROR \ + (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 17) +#define BFA_LOG_LINUX_DRIVER_DIAG \ + (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 18) +#define BFA_LOG_LINUX_DRIVER_AEN \ + (((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 19) #endif -- cgit v1.2.3 From f926a05f5c1507aeae0e36175a03c0a19c201187 Mon Sep 17 00:00:00 2001 From: Krishna Gudipati Date: Fri, 5 Mar 2010 19:36:00 -0800 Subject: [SCSI] bfa: FCS authentication related changes. Made FCS authentication related changes to state machines and header files. Made changes in FCS state machines to handle the case when secret string is NULL. Signed-off-by: Krishna Gudipati Signed-off-by: James Bottomley --- drivers/scsi/bfa/bfa_fcs_lport.c | 2 ++ drivers/scsi/bfa/fabric.c | 6 ++++++ drivers/scsi/bfa/fcs_fabric.h | 1 + drivers/scsi/bfa/include/defs/bfa_defs_auth.h | 22 ++++++++++++++++++++++ drivers/scsi/bfa/include/defs/bfa_defs_pport.h | 4 ++-- 5 files changed, 33 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c index 7bb182dcbd7d..4a51aac7ab04 100644 --- a/drivers/scsi/bfa/bfa_fcs_lport.c +++ b/drivers/scsi/bfa/bfa_fcs_lport.c @@ -931,6 +931,8 @@ bfa_fcs_port_get_attr(struct bfa_fcs_port_s *port, if (port->fabric) { port_attr->port_type = bfa_fcs_fabric_port_type(port->fabric); port_attr->loopback = bfa_fcs_fabric_is_loopback(port->fabric); + port_attr->authfail = + bfa_fcs_fabric_is_auth_failed(port->fabric); port_attr->fabric_name = bfa_fcs_port_get_fabric_name(port); memcpy(port_attr->fabric_ip_addr, bfa_fcs_port_get_fabric_ipaddr(port), diff --git a/drivers/scsi/bfa/fabric.c b/drivers/scsi/bfa/fabric.c index 20a686a420a2..b02ed7653eb6 100644 --- a/drivers/scsi/bfa/fabric.c +++ b/drivers/scsi/bfa/fabric.c @@ -895,6 +895,12 @@ bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric) return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_loopback); } +bfa_boolean_t +bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric) +{ + return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_auth_failed); +} + enum bfa_pport_type bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric) { diff --git a/drivers/scsi/bfa/fcs_fabric.h b/drivers/scsi/bfa/fcs_fabric.h index 8237bd5e7217..244c3f00c50c 100644 --- a/drivers/scsi/bfa/fcs_fabric.h +++ b/drivers/scsi/bfa/fcs_fabric.h @@ -47,6 +47,7 @@ void bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, u16 len); u16 bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric); bfa_boolean_t bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric); +bfa_boolean_t bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric); enum bfa_pport_type bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric); void bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric); void bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric); diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_auth.h b/drivers/scsi/bfa/include/defs/bfa_defs_auth.h index dd19c83aba58..45df32820911 100644 --- a/drivers/scsi/bfa/include/defs/bfa_defs_auth.h +++ b/drivers/scsi/bfa/include/defs/bfa_defs_auth.h @@ -23,6 +23,7 @@ #define PRIVATE_KEY 19009 #define KEY_LEN 32399 #define BFA_AUTH_SECRET_STRING_LEN 256 +#define BFA_AUTH_FAIL_NO_PASSWORD 0xFE #define BFA_AUTH_FAIL_TIMEOUT 0xFF /** @@ -41,6 +42,27 @@ enum bfa_auth_status { BFA_AUTH_STATUS_UNKNOWN = 9, /* authentication status unknown */ }; +enum bfa_auth_rej_code { + BFA_AUTH_RJT_CODE_AUTH_FAILURE = 1, /* auth failure */ + BFA_AUTH_RJT_CODE_LOGICAL_ERR = 2, /* logical error */ +}; + +/** + * Authentication reject codes + */ +enum bfa_auth_rej_code_exp { + BFA_AUTH_MECH_NOT_USABLE = 1, /* auth. mechanism not usable */ + BFA_AUTH_DH_GROUP_NOT_USABLE = 2, /* DH Group not usable */ + BFA_AUTH_HASH_FUNC_NOT_USABLE = 3, /* hash Function not usable */ + BFA_AUTH_AUTH_XACT_STARTED = 4, /* auth xact started */ + BFA_AUTH_AUTH_FAILED = 5, /* auth failed */ + BFA_AUTH_INCORRECT_PLD = 6, /* incorrect payload */ + BFA_AUTH_INCORRECT_PROTO_MSG = 7, /* incorrect proto msg */ + BFA_AUTH_RESTART_AUTH_PROTO = 8, /* restart auth protocol */ + BFA_AUTH_AUTH_CONCAT_NOT_SUPP = 9, /* auth concat not supported */ + BFA_AUTH_PROTO_VER_NOT_SUPP = 10,/* proto version not supported */ +}; + struct auth_proto_stats_s { u32 auth_rjts; u32 auth_negs; diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_pport.h b/drivers/scsi/bfa/include/defs/bfa_defs_pport.h index bf320412ee24..88662a15a21b 100644 --- a/drivers/scsi/bfa/include/defs/bfa_defs_pport.h +++ b/drivers/scsi/bfa/include/defs/bfa_defs_pport.h @@ -232,7 +232,7 @@ struct bfa_pport_attr_s { u32 pid; /* port ID */ enum bfa_pport_type port_type; /* current topology */ u32 loopback; /* external loopback */ - u32 rsvd1; + u32 authfail; /* auth fail state */ u32 rsvd2; /* padding for 64 bit */ }; @@ -247,7 +247,7 @@ struct bfa_pport_fc_stats_s { u64 rx_words; /* received words */ u64 lip_count; /* LIPs seen */ u64 nos_count; /* NOS count */ - u64 error_frames; /* errored frames (sent?) */ + u64 error_frames; /* errored frames */ u64 dropped_frames; /* dropped frames */ u64 link_failures; /* link failure count */ u64 loss_of_syncs; /* loss of sync count */ -- cgit v1.2.3 From 738c9e66dcb7e17a962a7d65c976386b970d10ca Mon Sep 17 00:00:00 2001 From: Krishna Gudipati Date: Fri, 5 Mar 2010 19:36:19 -0800 Subject: [SCSI] bfa: Added firmware save clear feature for BFA driver. Signed-off-by: Krishna Gudipati Signed-off-by: James Bottomley --- drivers/scsi/bfa/bfa_core.c | 9 +++++++++ drivers/scsi/bfa/bfa_ioc.c | 10 +++++++++- drivers/scsi/bfa/bfa_ioc.h | 1 + drivers/scsi/bfa/include/bfa.h | 1 + 4 files changed, 20 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c index 72e3f2f63b2e..0c08e185a766 100644 --- a/drivers/scsi/bfa/bfa_core.c +++ b/drivers/scsi/bfa/bfa_core.c @@ -384,6 +384,15 @@ bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen) return bfa_ioc_debug_fwsave(&bfa->ioc, trcdata, trclen); } +/** + * Clear the saved firmware trace information of an IOC. + */ +void +bfa_debug_fwsave_clear(struct bfa_s *bfa) +{ + bfa_ioc_debug_fwsave_clear(&bfa->ioc); +} + /** * Fetch firmware trace data. * diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c index 8a6361cf2439..0019ff7359db 100644 --- a/drivers/scsi/bfa/bfa_ioc.c +++ b/drivers/scsi/bfa/bfa_ioc.c @@ -1488,7 +1488,6 @@ return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0; void bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave) { - bfa_assert(ioc->auto_recover); ioc->dbg_fwsave = dbg_fwsave; ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->auto_recover); } @@ -1924,6 +1923,15 @@ bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen) return BFA_STATUS_OK; } +/** + * Clear saved firmware trace + */ +void +bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc) +{ + ioc->dbg_fwsave_once = BFA_TRUE; +} + /** * Retrieve saved firmware trace from a prior IOC failure. */ diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h index 853cc3136f0e..4b73efad1ee5 100644 --- a/drivers/scsi/bfa/bfa_ioc.h +++ b/drivers/scsi/bfa/bfa_ioc.h @@ -270,6 +270,7 @@ int bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover); void bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave); bfa_status_t bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen); +void bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc); bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen); u32 bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr); diff --git a/drivers/scsi/bfa/include/bfa.h b/drivers/scsi/bfa/include/bfa.h index 942ae64038c9..17654cae0c75 100644 --- a/drivers/scsi/bfa/include/bfa.h +++ b/drivers/scsi/bfa/include/bfa.h @@ -172,6 +172,7 @@ void bfa_timer_tick(struct bfa_s *bfa); */ bfa_status_t bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen); bfa_status_t bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen); +void bfa_debug_fwsave_clear(struct bfa_s *bfa); #include "bfa_priv.h" -- cgit v1.2.3 From 9693e7dff5c2911b4e445f5f656ef57b3a5bffac Mon Sep 17 00:00:00 2001 From: Krishna Gudipati Date: Fri, 5 Mar 2010 19:36:30 -0800 Subject: [SCSI] bfa: Introduce a link notification state machine. Introduce a link notification state machine to handle next incoming link events while the current event is being delivered to the driver. When the event has been processed by the driver, the link notification state machine will queue the next event (if there is any) to the driver. Signed-off-by: Krishna Gudipati Signed-off-by: James Bottomley --- drivers/scsi/bfa/bfa_fcport.c | 232 ++++++++++++++++++++++++++++++++++++--- drivers/scsi/bfa/bfa_port_priv.h | 13 ++- 2 files changed, 230 insertions(+), 15 deletions(-) diff --git a/drivers/scsi/bfa/bfa_fcport.c b/drivers/scsi/bfa/bfa_fcport.c index aef648b55dfc..4ed048bf45cb 100644 --- a/drivers/scsi/bfa/bfa_fcport.c +++ b/drivers/scsi/bfa/bfa_fcport.c @@ -26,16 +26,6 @@ BFA_TRC_FILE(HAL, PPORT); BFA_MODULE(pport); -#define bfa_pport_callback(__pport, __event) do { \ - if ((__pport)->bfa->fcs) { \ - (__pport)->event_cbfn((__pport)->event_cbarg, (__event)); \ - } else { \ - (__pport)->hcb_event = (__event); \ - bfa_cb_queue((__pport)->bfa, &(__pport)->hcb_qe, \ - __bfa_cb_port_event, (__pport)); \ - } \ -} while (0) - /* * The port is considered disabled if corresponding physical port or IOC are * disabled explicitly @@ -57,7 +47,10 @@ static void __bfa_cb_port_stats(void *cbarg, bfa_boolean_t complete); static void __bfa_cb_port_stats_clr(void *cbarg, bfa_boolean_t complete); static void bfa_port_stats_timeout(void *cbarg); static void bfa_port_stats_clr_timeout(void *cbarg); - +static void bfa_pport_callback(struct bfa_pport_s *pport, + enum bfa_pport_linkstate event); +static void bfa_pport_queue_cb(struct bfa_pport_ln_s *ln, + enum bfa_pport_linkstate event); /** * bfa_pport_private */ @@ -77,6 +70,16 @@ enum bfa_pport_sm_event { BFA_PPORT_SM_HWFAIL = 9, /* IOC h/w failure */ }; +/** + * BFA port link notification state machine events + */ + +enum bfa_pport_ln_sm_event { + BFA_PPORT_LN_SM_LINKUP = 1, /* linkup event */ + BFA_PPORT_LN_SM_LINKDOWN = 2, /* linkdown event */ + BFA_PPORT_LN_SM_NOTIFICATION = 3 /* done notification */ +}; + static void bfa_pport_sm_uninit(struct bfa_pport_s *pport, enum bfa_pport_sm_event event); static void bfa_pport_sm_enabling_qwait(struct bfa_pport_s *pport, @@ -100,6 +103,21 @@ static void bfa_pport_sm_iocdown(struct bfa_pport_s *pport, static void bfa_pport_sm_iocfail(struct bfa_pport_s *pport, enum bfa_pport_sm_event event); +static void bfa_pport_ln_sm_dn(struct bfa_pport_ln_s *ln, + enum bfa_pport_ln_sm_event event); +static void bfa_pport_ln_sm_dn_nf(struct bfa_pport_ln_s *ln, + enum bfa_pport_ln_sm_event event); +static void bfa_pport_ln_sm_dn_up_nf(struct bfa_pport_ln_s *ln, + enum bfa_pport_ln_sm_event event); +static void bfa_pport_ln_sm_up(struct bfa_pport_ln_s *ln, + enum bfa_pport_ln_sm_event event); +static void bfa_pport_ln_sm_up_nf(struct bfa_pport_ln_s *ln, + enum bfa_pport_ln_sm_event event); +static void bfa_pport_ln_sm_up_dn_nf(struct bfa_pport_ln_s *ln, + enum bfa_pport_ln_sm_event event); +static void bfa_pport_ln_sm_up_dn_up_nf(struct bfa_pport_ln_s *ln, + enum bfa_pport_ln_sm_event event); + static struct bfa_sm_table_s hal_pport_sm_table[] = { {BFA_SM(bfa_pport_sm_uninit), BFA_PPORT_ST_UNINIT}, {BFA_SM(bfa_pport_sm_enabling_qwait), BFA_PPORT_ST_ENABLING_QWAIT}, @@ -619,7 +637,163 @@ bfa_pport_sm_iocfail(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) } } +/** + * Link state is down + */ +static void +bfa_pport_ln_sm_dn(struct bfa_pport_ln_s *ln, + enum bfa_pport_ln_sm_event event) +{ + bfa_trc(ln->pport->bfa, event); + + switch (event) { + case BFA_PPORT_LN_SM_LINKUP: + bfa_sm_set_state(ln, bfa_pport_ln_sm_up_nf); + bfa_pport_queue_cb(ln, BFA_PPORT_LINKUP); + break; + + default: + bfa_sm_fault(ln->pport->bfa, event); + } +} + +/** + * Link state is waiting for down notification + */ +static void +bfa_pport_ln_sm_dn_nf(struct bfa_pport_ln_s *ln, + enum bfa_pport_ln_sm_event event) +{ + bfa_trc(ln->pport->bfa, event); + + switch (event) { + case BFA_PPORT_LN_SM_LINKUP: + bfa_sm_set_state(ln, bfa_pport_ln_sm_dn_up_nf); + break; + + case BFA_PPORT_LN_SM_NOTIFICATION: + bfa_sm_set_state(ln, bfa_pport_ln_sm_dn); + break; + + default: + bfa_sm_fault(ln->pport->bfa, event); + } +} + +/** + * Link state is waiting for down notification and there is a pending up + */ +static void +bfa_pport_ln_sm_dn_up_nf(struct bfa_pport_ln_s *ln, + enum bfa_pport_ln_sm_event event) +{ + bfa_trc(ln->pport->bfa, event); + + switch (event) { + case BFA_PPORT_LN_SM_LINKDOWN: + bfa_sm_set_state(ln, bfa_pport_ln_sm_dn_nf); + break; + + case BFA_PPORT_LN_SM_NOTIFICATION: + bfa_sm_set_state(ln, bfa_pport_ln_sm_up_nf); + bfa_pport_queue_cb(ln, BFA_PPORT_LINKUP); + break; + + default: + bfa_sm_fault(ln->pport->bfa, event); + } +} + +/** + * Link state is up + */ +static void +bfa_pport_ln_sm_up(struct bfa_pport_ln_s *ln, + enum bfa_pport_ln_sm_event event) +{ + bfa_trc(ln->pport->bfa, event); + + switch (event) { + case BFA_PPORT_LN_SM_LINKDOWN: + bfa_sm_set_state(ln, bfa_pport_ln_sm_dn_nf); + bfa_pport_queue_cb(ln, BFA_PPORT_LINKDOWN); + break; + + default: + bfa_sm_fault(ln->pport->bfa, event); + } +} + +/** + * Link state is waiting for up notification + */ +static void +bfa_pport_ln_sm_up_nf(struct bfa_pport_ln_s *ln, + enum bfa_pport_ln_sm_event event) +{ + bfa_trc(ln->pport->bfa, event); + + switch (event) { + case BFA_PPORT_LN_SM_LINKDOWN: + bfa_sm_set_state(ln, bfa_pport_ln_sm_up_dn_nf); + break; + + case BFA_PPORT_LN_SM_NOTIFICATION: + bfa_sm_set_state(ln, bfa_pport_ln_sm_up); + break; + + default: + bfa_sm_fault(ln->pport->bfa, event); + } +} + +/** + * Link state is waiting for up notification and there is a pending down + */ +static void +bfa_pport_ln_sm_up_dn_nf(struct bfa_pport_ln_s *ln, + enum bfa_pport_ln_sm_event event) +{ + bfa_trc(ln->pport->bfa, event); + switch (event) { + case BFA_PPORT_LN_SM_LINKUP: + bfa_sm_set_state(ln, bfa_pport_ln_sm_up_dn_up_nf); + break; + + case BFA_PPORT_LN_SM_NOTIFICATION: + bfa_sm_set_state(ln, bfa_pport_ln_sm_dn_nf); + bfa_pport_queue_cb(ln, BFA_PPORT_LINKDOWN); + break; + + default: + bfa_sm_fault(ln->pport->bfa, event); + } +} + +/** + * Link state is waiting for up notification and there are pending down and up + */ +static void +bfa_pport_ln_sm_up_dn_up_nf(struct bfa_pport_ln_s *ln, + enum bfa_pport_ln_sm_event event) +{ + bfa_trc(ln->pport->bfa, event); + + switch (event) { + case BFA_PPORT_LN_SM_LINKDOWN: + bfa_sm_set_state(ln, bfa_pport_ln_sm_up_dn_nf); + break; + + case BFA_PPORT_LN_SM_NOTIFICATION: + bfa_sm_set_state(ln, bfa_pport_ln_sm_dn_up_nf); + bfa_pport_queue_cb(ln, BFA_PPORT_LINKDOWN); + break; + + default: + bfa_sm_fault(ln->pport->bfa, event); + } +} /** * bfa_pport_private @@ -628,10 +802,12 @@ bfa_pport_sm_iocfail(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) static void __bfa_cb_port_event(void *cbarg, bfa_boolean_t complete) { - struct bfa_pport_s *pport = cbarg; + struct bfa_pport_ln_s *ln = cbarg; if (complete) - pport->event_cbfn(pport->event_cbarg, pport->hcb_event); + ln->pport->event_cbfn(ln->pport->event_cbarg, ln->ln_event); + else + bfa_sm_send_event(ln, BFA_PPORT_LN_SM_NOTIFICATION); } #define PPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_pport_stats_u), \ @@ -681,13 +857,16 @@ bfa_pport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, { struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); struct bfa_pport_cfg_s *port_cfg = &pport->cfg; + struct bfa_pport_ln_s *ln = &pport->ln; bfa_os_memset(pport, 0, sizeof(struct bfa_pport_s)); pport->bfa = bfa; + ln->pport = pport; bfa_pport_mem_claim(pport, meminfo); bfa_sm_set_state(pport, bfa_pport_sm_uninit); + bfa_sm_set_state(ln, bfa_pport_ln_sm_dn); /** * initialize and set default configuration @@ -1368,6 +1547,33 @@ bfa_port_stats_clr_timeout(void *cbarg) bfa_cb_queue(port->bfa, &port->hcb_qe, __bfa_cb_port_stats_clr, port); } +static void +bfa_pport_callback(struct bfa_pport_s *pport, enum bfa_pport_linkstate event) +{ + if (pport->bfa->fcs) { + pport->event_cbfn(pport->event_cbarg, event); + return; + } + + switch (event) { + case BFA_PPORT_LINKUP: + bfa_sm_send_event(&pport->ln, BFA_PPORT_LN_SM_LINKUP); + break; + case BFA_PPORT_LINKDOWN: + bfa_sm_send_event(&pport->ln, BFA_PPORT_LN_SM_LINKDOWN); + break; + default: + bfa_assert(0); + } +} + +static void +bfa_pport_queue_cb(struct bfa_pport_ln_s *ln, enum bfa_pport_linkstate event) +{ + ln->ln_event = event; + bfa_cb_queue(ln->pport->bfa, &ln->ln_qe, __bfa_cb_port_event, ln); +} + static void __bfa_cb_port_stats(void *cbarg, bfa_boolean_t complete) { diff --git a/drivers/scsi/bfa/bfa_port_priv.h b/drivers/scsi/bfa/bfa_port_priv.h index 51f698a06b6d..f29701bd2369 100644 --- a/drivers/scsi/bfa/bfa_port_priv.h +++ b/drivers/scsi/bfa/bfa_port_priv.h @@ -22,6 +22,16 @@ #include #include "bfa_intr_priv.h" +/** + * Link notification data structure + */ +struct bfa_pport_ln_s { + struct bfa_pport_s *pport; + bfa_sm_t sm; + struct bfa_cb_qe_s ln_qe; /* BFA callback queue elem for ln */ + enum bfa_pport_linkstate ln_event; /* ln event for callback */ +}; + /** * BFA physical port data structure */ @@ -52,9 +62,8 @@ struct bfa_pport_s { union bfi_pport_i2h_msg_u i2hmsg; } event_arg; void *bfad; /* BFA driver handle */ + struct bfa_pport_ln_s ln; /* Link Notification */ struct bfa_cb_qe_s hcb_qe; /* BFA callback queue elem */ - enum bfa_pport_linkstate hcb_event; - /* link event for callback */ u32 msgtag; /* fimrware msg tag for reply */ u8 *stats_kva; u64 stats_pa; -- cgit v1.2.3 From 2993cc71d1bff61999ade7f2b6b3ea2dd1e2c8d9 Mon Sep 17 00:00:00 2001 From: Krishna Gudipati Date: Fri, 5 Mar 2010 19:36:47 -0800 Subject: [SCSI] bfa: AEN and byte alignment fixes. Replace enum types with int and rearrange the fields to fix some alignment issue. Local var ioc_attr is causing the stack to overflow, so removed the usage of the local ioc_attr var and now invoking an API to return the ioc_type. Fix some AEN issues. Signed-off-by: Krishna Gudipati Signed-off-by: James Bottomley --- drivers/scsi/bfa/bfa_fcport.c | 1 + drivers/scsi/bfa/bfa_ioc.c | 32 +++++++++++------ drivers/scsi/bfa/bfad.c | 9 +---- drivers/scsi/bfa/bfad_drv.h | 12 +------ drivers/scsi/bfa/include/aen/bfa_aen.h | 50 ++++++++++++++------------ drivers/scsi/bfa/include/defs/bfa_defs_aen.h | 10 ++++++ drivers/scsi/bfa/include/defs/bfa_defs_ioc.h | 2 +- drivers/scsi/bfa/include/defs/bfa_defs_lport.h | 4 +-- drivers/scsi/bfa/include/defs/bfa_defs_port.h | 17 ++++----- 9 files changed, 73 insertions(+), 64 deletions(-) diff --git a/drivers/scsi/bfa/bfa_fcport.c b/drivers/scsi/bfa/bfa_fcport.c index 4ed048bf45cb..bdea7f0eb6bd 100644 --- a/drivers/scsi/bfa/bfa_fcport.c +++ b/drivers/scsi/bfa/bfa_fcport.c @@ -142,6 +142,7 @@ bfa_pport_aen_post(struct bfa_pport_s *pport, enum bfa_port_aen_event event) char pwwn_ptr[BFA_STRING_32]; struct bfa_ioc_attr_s ioc_attr; + memset(&aen_data, 0, sizeof(aen_data)); wwn2str(pwwn_ptr, pwwn); switch (event) { case BFA_PORT_AEN_ONLINE: diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c index 0019ff7359db..2f09d17730cc 100644 --- a/drivers/scsi/bfa/bfa_ioc.c +++ b/drivers/scsi/bfa/bfa_ioc.c @@ -1731,6 +1731,21 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc, ad_attr->cna_capable = ioc->cna; } +enum bfa_ioc_type_e +bfa_ioc_get_type(struct bfa_ioc_s *ioc) +{ + if (!ioc->ctdev || ioc->fcmode) + return BFA_IOC_TYPE_FC; + else if (ioc->ioc_mc == BFI_MC_IOCFC) + return BFA_IOC_TYPE_FCoE; + else if (ioc->ioc_mc == BFI_MC_LL) + return BFA_IOC_TYPE_LL; + else { + bfa_assert(ioc->ioc_mc == BFI_MC_LL); + return BFA_IOC_TYPE_LL; + } +} + void bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr) { @@ -1739,12 +1754,7 @@ bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr) ioc_attr->state = bfa_sm_to_state(ioc_sm_table, ioc->fsm); ioc_attr->port_id = ioc->port_id; - if (!ioc->ctdev || ioc->fcmode) - ioc_attr->ioc_type = BFA_IOC_TYPE_FC; - else if (ioc->ioc_mc == BFI_MC_IOCFC) - ioc_attr->ioc_type = BFA_IOC_TYPE_FCoE; - else if (ioc->ioc_mc == BFI_MC_LL) - ioc_attr->ioc_type = BFA_IOC_TYPE_LL; + ioc_attr->ioc_type = bfa_ioc_get_type(ioc); bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr); @@ -1860,7 +1870,7 @@ bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event) union bfa_aen_data_u aen_data; struct bfa_log_mod_s *logmod = ioc->logm; s32 inst_num = 0; - struct bfa_ioc_attr_s ioc_attr; + enum bfa_ioc_type_e ioc_type; switch (event) { case BFA_IOC_AEN_HBGOOD: @@ -1884,8 +1894,8 @@ bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event) memset(&aen_data.ioc.pwwn, 0, sizeof(aen_data.ioc.pwwn)); memset(&aen_data.ioc.mac, 0, sizeof(aen_data.ioc.mac)); - bfa_ioc_get_attr(ioc, &ioc_attr); - switch (ioc_attr.ioc_type) { + ioc_type = bfa_ioc_get_type(ioc); + switch (ioc_type) { case BFA_IOC_TYPE_FC: aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc); break; @@ -1897,10 +1907,10 @@ bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event) aen_data.ioc.mac = bfa_ioc_get_mac(ioc); break; default: - bfa_assert(ioc_attr.ioc_type == BFA_IOC_TYPE_FC); + bfa_assert(ioc_type == BFA_IOC_TYPE_FC); break; } - aen_data.ioc.ioc_type = ioc_attr.ioc_type; + aen_data.ioc.ioc_type = ioc_type; } /** diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index 4ccaeaecd14c..79956c152af9 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -677,7 +677,6 @@ bfad_drv_init(struct bfad_s *bfad) bfa_status_t rc; unsigned long flags; struct bfa_fcs_driver_info_s driver_info; - int i; bfad->cfg_data.rport_del_timeout = rport_del_timeout; bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth; @@ -697,12 +696,7 @@ bfad_drv_init(struct bfad_s *bfad) bfa_init_log(&bfad->bfa, bfad->logmod); bfa_init_trc(&bfad->bfa, bfad->trcmod); bfa_init_aen(&bfad->bfa, bfad->aen); - INIT_LIST_HEAD(&bfad->file_q); - INIT_LIST_HEAD(&bfad->file_free_q); - for (i = 0; i < BFAD_AEN_MAX_APPS; i++) { - bfa_q_qe_init(&bfad->file_buf[i].qe); - list_add_tail(&bfad->file_buf[i].qe, &bfad->file_free_q); - } + memset(bfad->file_map, 0, sizeof(bfad->file_map)); bfa_init_plog(&bfad->bfa, &bfad->plog_buf); bfa_plog_init(&bfad->plog_buf); bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START, @@ -799,7 +793,6 @@ bfad_drv_uninit(struct bfad_s *bfad) bfa_isr_disable(&bfad->bfa); bfa_detach(&bfad->bfa); bfad_remove_intr(bfad); - bfa_assert(list_empty(&bfad->file_q)); bfad_hal_mem_release(bfad); bfad->bfad_flags &= ~BFAD_DRV_INIT_DONE; diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h index 9fa801a50250..94f4d84c71c9 100644 --- a/drivers/scsi/bfa/bfad_drv.h +++ b/drivers/scsi/bfa/bfad_drv.h @@ -139,14 +139,6 @@ struct bfad_cfg_param_s { u32 binding_method; }; -#define BFAD_AEN_MAX_APPS 8 -struct bfad_aen_file_s { - struct list_head qe; - struct bfad_s *bfad; - s32 ri; - s32 app_id; -}; - /* * BFAD (PCI function) data structure */ @@ -186,9 +178,7 @@ struct bfad_s { struct bfa_log_mod_s *logmod; struct bfa_aen_s *aen; struct bfa_aen_s aen_buf; - struct bfad_aen_file_s file_buf[BFAD_AEN_MAX_APPS]; - struct list_head file_q; - struct list_head file_free_q; + void *file_map[BFA_AEN_MAX_APP]; struct bfa_plog_s plog_buf; int ref_count; bfa_boolean_t ipfc_enabled; diff --git a/drivers/scsi/bfa/include/aen/bfa_aen.h b/drivers/scsi/bfa/include/aen/bfa_aen.h index d9cbc2a783d4..6abbab005db6 100644 --- a/drivers/scsi/bfa/include/aen/bfa_aen.h +++ b/drivers/scsi/bfa/include/aen/bfa_aen.h @@ -18,21 +18,24 @@ #define __BFA_AEN_H__ #include "defs/bfa_defs_aen.h" +#include "defs/bfa_defs_status.h" +#include "cs/bfa_debug.h" -#define BFA_AEN_MAX_ENTRY 512 +#define BFA_AEN_MAX_ENTRY 512 -extern s32 bfa_aen_max_cfg_entry; +extern int bfa_aen_max_cfg_entry; struct bfa_aen_s { void *bfad; - s32 max_entry; - s32 write_index; - s32 read_index; - u32 bfad_num; - u32 seq_num; + int max_entry; + int write_index; + int read_index; + int bfad_num; + int seq_num; void (*aen_cb_notify)(void *bfad); void (*gettimeofday)(struct bfa_timeval_s *tv); - struct bfa_trc_mod_s *trcmod; - struct bfa_aen_entry_s list[BFA_AEN_MAX_ENTRY]; /* Must be the last */ + struct bfa_trc_mod_s *trcmod; + int app_ri[BFA_AEN_MAX_APP]; /* For multiclient support */ + struct bfa_aen_entry_s list[BFA_AEN_MAX_ENTRY]; /* Must be the last */ }; @@ -45,48 +48,49 @@ bfa_aen_set_max_cfg_entry(int max_entry) bfa_aen_max_cfg_entry = max_entry; } -static inline s32 +static inline int bfa_aen_get_max_cfg_entry(void) { return bfa_aen_max_cfg_entry; } -static inline s32 +static inline int bfa_aen_get_meminfo(void) { return sizeof(struct bfa_aen_entry_s) * bfa_aen_get_max_cfg_entry(); } -static inline s32 +static inline int bfa_aen_get_wi(struct bfa_aen_s *aen) { return aen->write_index; } -static inline s32 +static inline int bfa_aen_get_ri(struct bfa_aen_s *aen) { return aen->read_index; } -static inline s32 -bfa_aen_fetch_count(struct bfa_aen_s *aen, s32 read_index) +static inline int +bfa_aen_fetch_count(struct bfa_aen_s *aen, enum bfa_aen_app app_id) { - return ((aen->write_index + aen->max_entry) - read_index) + bfa_assert((app_id < BFA_AEN_MAX_APP) && (app_id >= bfa_aen_app_bcu)); + return ((aen->write_index + aen->max_entry) - aen->app_ri[app_id]) % aen->max_entry; } -s32 bfa_aen_init(struct bfa_aen_s *aen, struct bfa_trc_mod_s *trcmod, - void *bfad, u32 inst_id, void (*aen_cb_notify)(void *), +int bfa_aen_init(struct bfa_aen_s *aen, struct bfa_trc_mod_s *trcmod, + void *bfad, int bfad_num, void (*aen_cb_notify)(void *), void (*gettimeofday)(struct bfa_timeval_s *)); -s32 bfa_aen_post(struct bfa_aen_s *aen, enum bfa_aen_category aen_category, +void bfa_aen_post(struct bfa_aen_s *aen, enum bfa_aen_category aen_category, int aen_type, union bfa_aen_data_u *aen_data); -s32 bfa_aen_fetch(struct bfa_aen_s *aen, struct bfa_aen_entry_s *aen_entry, - s32 entry_space, s32 rii, s32 *ri_arr, - s32 ri_arr_cnt); +bfa_status_t bfa_aen_fetch(struct bfa_aen_s *aen, + struct bfa_aen_entry_s *aen_entry, + int entry_req, enum bfa_aen_app app_id, int *entry_ret); -s32 bfa_aen_get_inst(struct bfa_aen_s *aen); +int bfa_aen_get_inst(struct bfa_aen_s *aen); #endif /* __BFA_AEN_H__ */ diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_aen.h b/drivers/scsi/bfa/include/defs/bfa_defs_aen.h index 4c81a613db3d..35244698fcdc 100644 --- a/drivers/scsi/bfa/include/defs/bfa_defs_aen.h +++ b/drivers/scsi/bfa/include/defs/bfa_defs_aen.h @@ -30,6 +30,16 @@ #include #include +#define BFA_AEN_MAX_APP 5 + +enum bfa_aen_app { + bfa_aen_app_bcu = 0, /* No thread for bcu */ + bfa_aen_app_hcm = 1, + bfa_aen_app_cim = 2, + bfa_aen_app_snia = 3, + bfa_aen_app_test = 4, /* To be removed after unit test */ +}; + enum bfa_aen_category { BFA_AEN_CAT_ADAPTER = 1, BFA_AEN_CAT_PORT = 2, diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h b/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h index 6c721b13aca4..8d8e6a966537 100644 --- a/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h +++ b/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h @@ -144,8 +144,8 @@ enum bfa_ioc_aen_event { * BFA IOC level event data, now just a place holder */ struct bfa_ioc_aen_data_s { - enum bfa_ioc_type_e ioc_type; wwn_t pwwn; + s16 ioc_type; mac_t mac; }; diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_lport.h b/drivers/scsi/bfa/include/defs/bfa_defs_lport.h index 7359f82aacfc..0952a139c47c 100644 --- a/drivers/scsi/bfa/include/defs/bfa_defs_lport.h +++ b/drivers/scsi/bfa/include/defs/bfa_defs_lport.h @@ -59,8 +59,8 @@ enum bfa_lport_aen_event { */ struct bfa_lport_aen_data_s { u16 vf_id; /* vf_id of this logical port */ - u16 rsvd; - enum bfa_port_role roles; /* Logical port mode,IM/TM/IP etc */ + s16 roles; /* Logical port mode,IM/TM/IP etc */ + u32 rsvd; wwn_t ppwwn; /* WWN of its physical port */ wwn_t lpwwn; /* WWN of this logical port */ }; diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_port.h b/drivers/scsi/bfa/include/defs/bfa_defs_port.h index 1c74a8b94aad..501bc9739d9d 100644 --- a/drivers/scsi/bfa/include/defs/bfa_defs_port.h +++ b/drivers/scsi/bfa/include/defs/bfa_defs_port.h @@ -234,14 +234,15 @@ enum bfa_port_aen_sfp_pom { }; struct bfa_port_aen_data_s { - enum bfa_ioc_type_e ioc_type; - wwn_t pwwn; /* WWN of the physical port */ - wwn_t fwwn; /* WWN of the fabric port */ - mac_t mac; /* MAC address of the ethernet port, - * applicable to CNA port only */ - int phy_port_num; /*! For SFP related events */ - enum bfa_port_aen_sfp_pom level; /* Only transitions will - * be informed */ + wwn_t pwwn; /* WWN of the physical port */ + wwn_t fwwn; /* WWN of the fabric port */ + s32 phy_port_num; /*! For SFP related events */ + s16 ioc_type; + s16 level; /* Only transitions will + * be informed */ + struct mac_s mac; /* MAC address of the ethernet port, + * applicable to CNA port only */ + s16 rsvd; }; #endif /* __BFA_DEFS_PORT_H__ */ -- cgit v1.2.3 From 816e49b8ed209e5e08d4c43359635cbca17e7196 Mon Sep 17 00:00:00 2001 From: Krishna Gudipati Date: Fri, 5 Mar 2010 19:36:56 -0800 Subject: [SCSI] bfa: IOC recovery fix in fcmode. ioc_recover failed to work in fcmode. Fixed the code to initialize the ioc_regs.err_set during the notify_hbfail. Signed-off-by: Krishna Gudipati Signed-off-by: James Bottomley --- drivers/scsi/bfa/bfa_ioc_ct.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c index 0430edd2e011..469da95aedf3 100644 --- a/drivers/scsi/bfa/bfa_ioc_ct.c +++ b/drivers/scsi/bfa/bfa_ioc_ct.c @@ -171,10 +171,14 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc) static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc) { - - bfa_reg_write(ioc->ioc_regs.ll_halt, __FW_INIT_HALT_P); - /* Wait for halt to take effect */ - bfa_reg_read(ioc->ioc_regs.ll_halt); + if (ioc->cna) { + bfa_reg_write(ioc->ioc_regs.ll_halt, __FW_INIT_HALT_P); + /* Wait for halt to take effect */ + bfa_reg_read(ioc->ioc_regs.ll_halt); + } else { + bfa_reg_write(ioc->ioc_regs.err_set, __PSS_ERR_STATUS_SET); + bfa_reg_read(ioc->ioc_regs.err_set); + } } /** @@ -254,6 +258,11 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc) */ ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; + + /* + * err set reg : for notification of hb failure in fcmode + */ + ioc->ioc_regs.err_set = (rb + ERR_SET_REG); } /** -- cgit v1.2.3 From f5713c5dfb4d61cd77debf61d3873eb36877ff1f Mon Sep 17 00:00:00 2001 From: Krishna Gudipati Date: Fri, 5 Mar 2010 19:37:09 -0800 Subject: [SCSI] bfa: Fix Command Queue (CPE) full condition check and ack CPE interrupt. Fixed the issue of not acknowledging the command queue full-to-non-full interrupt. Implemented separate acknowledging functions for different ASIC and interrupt mode. Fixed the case of missing CPE interrupt by always processing the pending requests in the completion path. Signed-off-by: Krishna Gudipati Signed-off-by: James Bottomley --- drivers/scsi/bfa/bfa_hw_cb.c | 13 ++++++++ drivers/scsi/bfa/bfa_hw_ct.c | 9 ++++++ drivers/scsi/bfa/bfa_intr.c | 71 +++++++++++++++++++++++++++++--------------- drivers/scsi/bfa/bfa_iocfc.c | 2 ++ drivers/scsi/bfa/bfa_iocfc.h | 3 ++ 5 files changed, 74 insertions(+), 24 deletions(-) diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c index ede1438619e2..871a4e28575c 100644 --- a/drivers/scsi/bfa/bfa_hw_cb.c +++ b/drivers/scsi/bfa/bfa_hw_cb.c @@ -52,6 +52,18 @@ bfa_hwcb_reginit(struct bfa_s *bfa) } } +void +bfa_hwcb_reqq_ack(struct bfa_s *bfa, int reqq) +{ +} + +static void +bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq) +{ + bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, + __HFN_INT_CPE_Q0 << CPE_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), reqq)); +} + void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq) { @@ -136,6 +148,7 @@ bfa_hwcb_msix_uninstall(struct bfa_s *bfa) void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix) { + bfa->iocfc.hwif.hw_reqq_ack = bfa_hwcb_reqq_ack_msix; bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix; } diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c index 51ae5740e6e9..76ceb9a4bf2f 100644 --- a/drivers/scsi/bfa/bfa_hw_ct.c +++ b/drivers/scsi/bfa/bfa_hw_ct.c @@ -84,6 +84,15 @@ bfa_hwct_reginit(struct bfa_s *bfa) } } +void +bfa_hwct_reqq_ack(struct bfa_s *bfa, int reqq) +{ + u32 r32; + + r32 = bfa_reg_read(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]); + bfa_reg_write(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq], r32); +} + void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq) { diff --git a/drivers/scsi/bfa/bfa_intr.c b/drivers/scsi/bfa/bfa_intr.c index c42254613f73..0eba3f930d5b 100644 --- a/drivers/scsi/bfa/bfa_intr.c +++ b/drivers/scsi/bfa/bfa_intr.c @@ -34,6 +34,26 @@ bfa_msix_lpu(struct bfa_s *bfa) bfa_ioc_mbox_isr(&bfa->ioc); } +static void +bfa_reqq_resume(struct bfa_s *bfa, int qid) +{ + struct list_head *waitq, *qe, *qen; + struct bfa_reqq_wait_s *wqe; + + waitq = bfa_reqq(bfa, qid); + list_for_each_safe(qe, qen, waitq) { + /** + * Callback only as long as there is room in request queue + */ + if (bfa_reqq_full(bfa, qid)) + break; + + list_del(qe); + wqe = (struct bfa_reqq_wait_s *) qe; + wqe->qresume(wqe->cbarg); + } +} + void bfa_msix_all(struct bfa_s *bfa, int vec) { @@ -128,23 +148,18 @@ bfa_isr_disable(struct bfa_s *bfa) void bfa_msix_reqq(struct bfa_s *bfa, int qid) { - struct list_head *waitq, *qe, *qen; - struct bfa_reqq_wait_s *wqe; + struct list_head *waitq; qid &= (BFI_IOC_MAX_CQS - 1); - waitq = bfa_reqq(bfa, qid); - list_for_each_safe(qe, qen, waitq) { - /** - * Callback only as long as there is room in request queue - */ - if (bfa_reqq_full(bfa, qid)) - break; + bfa->iocfc.hwif.hw_reqq_ack(bfa, qid); - list_del(qe); - wqe = (struct bfa_reqq_wait_s *) qe; - wqe->qresume(wqe->cbarg); - } + /** + * Resume any pending requests in the corresponding reqq. + */ + waitq = bfa_reqq(bfa, qid); + if (!list_empty(waitq)) + bfa_reqq_resume(bfa, qid); } void @@ -158,26 +173,27 @@ bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m) } void -bfa_msix_rspq(struct bfa_s *bfa, int rsp_qid) +bfa_msix_rspq(struct bfa_s *bfa, int qid) { - struct bfi_msg_s *m; - u32 pi, ci; + struct bfi_msg_s *m; + u32 pi, ci; + struct list_head *waitq; - bfa_trc_fp(bfa, rsp_qid); + bfa_trc_fp(bfa, qid); - rsp_qid &= (BFI_IOC_MAX_CQS - 1); + qid &= (BFI_IOC_MAX_CQS - 1); - bfa->iocfc.hwif.hw_rspq_ack(bfa, rsp_qid); + bfa->iocfc.hwif.hw_rspq_ack(bfa, qid); - ci = bfa_rspq_ci(bfa, rsp_qid); - pi = bfa_rspq_pi(bfa, rsp_qid); + ci = bfa_rspq_ci(bfa, qid); + pi = bfa_rspq_pi(bfa, qid); bfa_trc_fp(bfa, ci); bfa_trc_fp(bfa, pi); if (bfa->rme_process) { while (ci != pi) { - m = bfa_rspq_elem(bfa, rsp_qid, ci); + m = bfa_rspq_elem(bfa, qid, ci); bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX); bfa_isrs[m->mhdr.msg_class] (bfa, m); @@ -189,9 +205,16 @@ bfa_msix_rspq(struct bfa_s *bfa, int rsp_qid) /** * update CI */ - bfa_rspq_ci(bfa, rsp_qid) = pi; - bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[rsp_qid], pi); + bfa_rspq_ci(bfa, qid) = pi; + bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[qid], pi); bfa_os_mmiowb(); + + /** + * Resume any pending requests in the corresponding reqq. + */ + waitq = bfa_reqq(bfa, qid); + if (!list_empty(waitq)) + bfa_reqq_resume(bfa, qid); } void diff --git a/drivers/scsi/bfa/bfa_iocfc.c b/drivers/scsi/bfa/bfa_iocfc.c index 137591c984d7..a5551db6dba6 100644 --- a/drivers/scsi/bfa/bfa_iocfc.c +++ b/drivers/scsi/bfa/bfa_iocfc.c @@ -172,6 +172,7 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, */ if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT) { iocfc->hwif.hw_reginit = bfa_hwct_reginit; + iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack; iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack; iocfc->hwif.hw_msix_init = bfa_hwct_msix_init; iocfc->hwif.hw_msix_install = bfa_hwct_msix_install; @@ -180,6 +181,7 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs; } else { iocfc->hwif.hw_reginit = bfa_hwcb_reginit; + iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack; iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack; iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init; iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install; diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h index ce9a830a4207..fbb4bdc9d600 100644 --- a/drivers/scsi/bfa/bfa_iocfc.h +++ b/drivers/scsi/bfa/bfa_iocfc.h @@ -54,6 +54,7 @@ struct bfa_msix_s { */ struct bfa_hwif_s { void (*hw_reginit)(struct bfa_s *bfa); + void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq); void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq); void (*hw_msix_init)(struct bfa_s *bfa, int nvecs); void (*hw_msix_install)(struct bfa_s *bfa); @@ -143,6 +144,7 @@ void bfa_msix_rspq(struct bfa_s *bfa, int vec); void bfa_msix_lpu_err(struct bfa_s *bfa, int vec); void bfa_hwcb_reginit(struct bfa_s *bfa); +void bfa_hwcb_reqq_ack(struct bfa_s *bfa, int rspq); void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq); void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs); void bfa_hwcb_msix_install(struct bfa_s *bfa); @@ -151,6 +153,7 @@ void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix); void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs, u32 *maxvec); void bfa_hwct_reginit(struct bfa_s *bfa); +void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq); void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq); void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs); void bfa_hwct_msix_install(struct bfa_s *bfa); -- cgit v1.2.3 From 78f915f7b095dda76970c8c9568489fa779ef73f Mon Sep 17 00:00:00 2001 From: Krishna Gudipati Date: Fri, 5 Mar 2010 19:37:18 -0800 Subject: [SCSI] bfa: In MSIX mode, ignore spurious RME interrupts when FCoE ports are in FW mismatch state. Use dummy interrupt handlers till chip initialization is complete. Install real interrupt handlers after chip initialization. Also removed msix installation code in bfa_iocfc_init(). Signed-off-by: Krishna Gudipati Signed-off-by: James Bottomley --- drivers/scsi/bfa/bfa_ioc_ct.c | 43 +++++++++++++++++-------------------------- drivers/scsi/bfa/bfa_iocfc.c | 1 - 2 files changed, 17 insertions(+), 27 deletions(-) diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c index 469da95aedf3..2431922c34a4 100644 --- a/drivers/scsi/bfa/bfa_ioc_ct.c +++ b/drivers/scsi/bfa/bfa_ioc_ct.c @@ -331,12 +331,12 @@ bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc) */ bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg); - pll_sclk = __APP_PLL_312_ENABLE | __APP_PLL_312_LRESETN | - __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(0U) | + pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST | + __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) | __APP_PLL_312_JITLMT0_1(3U) | __APP_PLL_312_CNTLMT0_1(1U); - pll_fclk = __APP_PLL_425_ENABLE | __APP_PLL_425_LRESETN | - __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(0U) | + pll_fclk = __APP_PLL_425_LRESETN | __APP_PLL_425_ENARST | + __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) | __APP_PLL_425_JITLMT0_1(3U) | __APP_PLL_425_CNTLMT0_1(1U); @@ -366,36 +366,27 @@ bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc) bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); - bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, - __APP_PLL_312_LOGIC_SOFT_RESET); - bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, - __APP_PLL_312_BYPASS | - __APP_PLL_312_LOGIC_SOFT_RESET); - bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, - __APP_PLL_425_LOGIC_SOFT_RESET); - bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, - __APP_PLL_425_BYPASS | - __APP_PLL_425_LOGIC_SOFT_RESET); - bfa_os_udelay(2); - bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, - __APP_PLL_312_LOGIC_SOFT_RESET); - bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, - __APP_PLL_425_LOGIC_SOFT_RESET); - - bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, - pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET); - bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, - pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET); + bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk | + __APP_PLL_312_LOGIC_SOFT_RESET); + bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk | + __APP_PLL_425_LOGIC_SOFT_RESET); + bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk | + __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE); + bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk | + __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE); /** * Wait for PLLs to lock. */ + bfa_reg_read(rb + HOSTFN0_INT_MSK); bfa_os_udelay(2000); bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); - bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk); - bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk); + bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk | + __APP_PLL_312_ENABLE); + bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk | + __APP_PLL_425_ENABLE); bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START); bfa_os_udelay(1000); diff --git a/drivers/scsi/bfa/bfa_iocfc.c b/drivers/scsi/bfa/bfa_iocfc.c index a5551db6dba6..6677f83f2c99 100644 --- a/drivers/scsi/bfa/bfa_iocfc.c +++ b/drivers/scsi/bfa/bfa_iocfc.c @@ -659,7 +659,6 @@ bfa_iocfc_init(struct bfa_s *bfa) { bfa->iocfc.action = BFA_IOCFC_ACT_INIT; bfa_ioc_enable(&bfa->ioc); - bfa_msix_install(bfa); } /** -- cgit v1.2.3 From 13cc20c5e764e6ef8d57f33980ab8c386c25fb4d Mon Sep 17 00:00:00 2001 From: Krishna Gudipati Date: Fri, 5 Mar 2010 19:37:29 -0800 Subject: [SCSI] bfa: IOC fixes, check for IOC down condition. Currently BFA was not checking for IOC down condition when issuing getstats/clearstats Add check to see if IOC is operational, before issuing getstats/clearstats. Signed-off-by: Krishna Gudipati Signed-off-by: James Bottomley --- drivers/scsi/bfa/bfa_ioc.c | 11 ++++++++--- drivers/scsi/bfa/bfa_iocfc.c | 10 ++++++++++ 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c index 2f09d17730cc..4d9a47ccffe9 100644 --- a/drivers/scsi/bfa/bfa_ioc.c +++ b/drivers/scsi/bfa/bfa_ioc.c @@ -1129,9 +1129,6 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type, if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ) boot_type = BFI_BOOT_TYPE_FLASH; fwimg = bfa_ioc_fwimg_get_chunk(ioc, chunkno); - fwimg[BFI_BOOT_TYPE_OFF / sizeof(u32)] = bfa_os_swap32(boot_type); - fwimg[BFI_BOOT_PARAM_OFF / sizeof(u32)] = - bfa_os_swap32(boot_param); pgnum = bfa_ioc_smem_pgnum(ioc, loff); pgoff = bfa_ioc_smem_pgoff(ioc, loff); @@ -1166,6 +1163,14 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type, bfa_reg_write(ioc->ioc_regs.host_page_num_fn, bfa_ioc_smem_pgnum(ioc, 0)); + + /* + * Set boot type and boot param at the end. + */ + bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF, + bfa_os_swap32(boot_type)); + bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_PARAM_OFF, + bfa_os_swap32(boot_param)); } static void diff --git a/drivers/scsi/bfa/bfa_iocfc.c b/drivers/scsi/bfa/bfa_iocfc.c index 6677f83f2c99..a76de2669bfc 100644 --- a/drivers/scsi/bfa/bfa_iocfc.c +++ b/drivers/scsi/bfa/bfa_iocfc.c @@ -801,6 +801,11 @@ bfa_iocfc_get_stats(struct bfa_s *bfa, struct bfa_iocfc_stats_s *stats, return BFA_STATUS_DEVBUSY; } + if (!bfa_iocfc_is_operational(bfa)) { + bfa_trc(bfa, 0); + return BFA_STATUS_IOC_NON_OP; + } + iocfc->stats_busy = BFA_TRUE; iocfc->stats_ret = stats; iocfc->stats_cbfn = cbfn; @@ -821,6 +826,11 @@ bfa_iocfc_clear_stats(struct bfa_s *bfa, bfa_cb_ioc_t cbfn, void *cbarg) return BFA_STATUS_DEVBUSY; } + if (!bfa_iocfc_is_operational(bfa)) { + bfa_trc(bfa, 0); + return BFA_STATUS_IOC_NON_OP; + } + iocfc->stats_busy = BFA_TRUE; iocfc->stats_cbfn = cbfn; iocfc->stats_cbarg = cbarg; -- cgit v1.2.3 From 1c8a4c37494932acd59079b4fc8d8f69fb329c2a Mon Sep 17 00:00:00 2001 From: Krishna Gudipati Date: Fri, 5 Mar 2010 19:37:37 -0800 Subject: [SCSI] bfa: Rename pport to fcport in BFA FCS. Rename pport structures to fcport in BFA FCS, to resolve confusion about the port structures in the firmware, and make sure the SG page is setup correctly. Signed-off-by: Krishna Gudipati Signed-off-by: James Bottomley --- drivers/scsi/bfa/bfa_fcport.c | 1570 +++++++++++++--------- drivers/scsi/bfa/bfa_fcs_port.c | 2 +- drivers/scsi/bfa/bfa_module.c | 4 +- drivers/scsi/bfa/bfa_modules_priv.h | 2 +- drivers/scsi/bfa/bfa_port_priv.h | 19 +- drivers/scsi/bfa/bfa_priv.h | 2 +- drivers/scsi/bfa/bfa_trcmod_priv.h | 2 +- drivers/scsi/bfa/bfad.c | 2 +- drivers/scsi/bfa/bfad_attr.c | 6 +- drivers/scsi/bfa/bfad_im.c | 2 +- drivers/scsi/bfa/fabric.c | 20 +- drivers/scsi/bfa/fdmi.c | 2 +- drivers/scsi/bfa/include/bfa_svc.h | 76 +- drivers/scsi/bfa/include/bfi/bfi_pport.h | 5 + drivers/scsi/bfa/include/cs/bfa_sm.h | 8 + drivers/scsi/bfa/include/defs/bfa_defs_ethport.h | 1 + drivers/scsi/bfa/include/defs/bfa_defs_fcport.h | 94 ++ drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h | 1 + drivers/scsi/bfa/loop.c | 2 +- drivers/scsi/bfa/lport_api.c | 2 +- drivers/scsi/bfa/ms.c | 2 +- drivers/scsi/bfa/ns.c | 2 +- drivers/scsi/bfa/rport.c | 8 +- drivers/scsi/bfa/rport_api.c | 2 +- drivers/scsi/bfa/vport.c | 2 +- 25 files changed, 1103 insertions(+), 735 deletions(-) create mode 100644 drivers/scsi/bfa/include/defs/bfa_defs_fcport.h diff --git a/drivers/scsi/bfa/bfa_fcport.c b/drivers/scsi/bfa/bfa_fcport.c index bdea7f0eb6bd..f392a7fa8c7f 100644 --- a/drivers/scsi/bfa/bfa_fcport.c +++ b/drivers/scsi/bfa/bfa_fcport.c @@ -23,34 +23,40 @@ #include #include -BFA_TRC_FILE(HAL, PPORT); -BFA_MODULE(pport); +BFA_TRC_FILE(HAL, FCPORT); +BFA_MODULE(fcport); /* * The port is considered disabled if corresponding physical port or IOC are * disabled explicitly */ #define BFA_PORT_IS_DISABLED(bfa) \ - ((bfa_pport_is_disabled(bfa) == BFA_TRUE) || \ + ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \ (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE)) /* * forward declarations */ -static bfa_boolean_t bfa_pport_send_enable(struct bfa_pport_s *port); -static bfa_boolean_t bfa_pport_send_disable(struct bfa_pport_s *port); -static void bfa_pport_update_linkinfo(struct bfa_pport_s *pport); -static void bfa_pport_reset_linkinfo(struct bfa_pport_s *pport); -static void bfa_pport_set_wwns(struct bfa_pport_s *port); -static void __bfa_cb_port_event(void *cbarg, bfa_boolean_t complete); +static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport); +static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport); +static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport); +static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport); +static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport); +static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete); +static void bfa_fcport_callback(struct bfa_fcport_s *fcport, + enum bfa_pport_linkstate event); +static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, + enum bfa_pport_linkstate event); +static void __bfa_cb_fcport_stats(void *cbarg, bfa_boolean_t complete); +static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete); +static void bfa_fcport_stats_timeout(void *cbarg); +static void bfa_fcport_stats_clr_timeout(void *cbarg); + static void __bfa_cb_port_stats(void *cbarg, bfa_boolean_t complete); static void __bfa_cb_port_stats_clr(void *cbarg, bfa_boolean_t complete); static void bfa_port_stats_timeout(void *cbarg); static void bfa_port_stats_clr_timeout(void *cbarg); -static void bfa_pport_callback(struct bfa_pport_s *pport, - enum bfa_pport_linkstate event); -static void bfa_pport_queue_cb(struct bfa_pport_ln_s *ln, - enum bfa_pport_linkstate event); + /** * bfa_pport_private */ @@ -58,87 +64,86 @@ static void bfa_pport_queue_cb(struct bfa_pport_ln_s *ln, /** * BFA port state machine events */ -enum bfa_pport_sm_event { - BFA_PPORT_SM_START = 1, /* start port state machine */ - BFA_PPORT_SM_STOP = 2, /* stop port state machine */ - BFA_PPORT_SM_ENABLE = 3, /* enable port */ - BFA_PPORT_SM_DISABLE = 4, /* disable port state machine */ - BFA_PPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */ - BFA_PPORT_SM_LINKUP = 6, /* firmware linkup event */ - BFA_PPORT_SM_LINKDOWN = 7, /* firmware linkup down */ - BFA_PPORT_SM_QRESUME = 8, /* CQ space available */ - BFA_PPORT_SM_HWFAIL = 9, /* IOC h/w failure */ +enum bfa_fcport_sm_event { + BFA_FCPORT_SM_START = 1, /* start port state machine */ + BFA_FCPORT_SM_STOP = 2, /* stop port state machine */ + BFA_FCPORT_SM_ENABLE = 3, /* enable port */ + BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */ + BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */ + BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */ + BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */ + BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */ + BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */ }; /** * BFA port link notification state machine events */ -enum bfa_pport_ln_sm_event { - BFA_PPORT_LN_SM_LINKUP = 1, /* linkup event */ - BFA_PPORT_LN_SM_LINKDOWN = 2, /* linkdown event */ - BFA_PPORT_LN_SM_NOTIFICATION = 3 /* done notification */ +enum bfa_fcport_ln_sm_event { + BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */ + BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */ + BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */ }; -static void bfa_pport_sm_uninit(struct bfa_pport_s *pport, - enum bfa_pport_sm_event event); -static void bfa_pport_sm_enabling_qwait(struct bfa_pport_s *pport, - enum bfa_pport_sm_event event); -static void bfa_pport_sm_enabling(struct bfa_pport_s *pport, - enum bfa_pport_sm_event event); -static void bfa_pport_sm_linkdown(struct bfa_pport_s *pport, - enum bfa_pport_sm_event event); -static void bfa_pport_sm_linkup(struct bfa_pport_s *pport, - enum bfa_pport_sm_event event); -static void bfa_pport_sm_disabling(struct bfa_pport_s *pport, - enum bfa_pport_sm_event event); -static void bfa_pport_sm_disabling_qwait(struct bfa_pport_s *pport, - enum bfa_pport_sm_event event); -static void bfa_pport_sm_disabled(struct bfa_pport_s *pport, - enum bfa_pport_sm_event event); -static void bfa_pport_sm_stopped(struct bfa_pport_s *pport, - enum bfa_pport_sm_event event); -static void bfa_pport_sm_iocdown(struct bfa_pport_s *pport, - enum bfa_pport_sm_event event); -static void bfa_pport_sm_iocfail(struct bfa_pport_s *pport, - enum bfa_pport_sm_event event); - -static void bfa_pport_ln_sm_dn(struct bfa_pport_ln_s *ln, - enum bfa_pport_ln_sm_event event); -static void bfa_pport_ln_sm_dn_nf(struct bfa_pport_ln_s *ln, - enum bfa_pport_ln_sm_event event); -static void bfa_pport_ln_sm_dn_up_nf(struct bfa_pport_ln_s *ln, - enum bfa_pport_ln_sm_event event); -static void bfa_pport_ln_sm_up(struct bfa_pport_ln_s *ln, - enum bfa_pport_ln_sm_event event); -static void bfa_pport_ln_sm_up_nf(struct bfa_pport_ln_s *ln, - enum bfa_pport_ln_sm_event event); -static void bfa_pport_ln_sm_up_dn_nf(struct bfa_pport_ln_s *ln, - enum bfa_pport_ln_sm_event event); -static void bfa_pport_ln_sm_up_dn_up_nf(struct bfa_pport_ln_s *ln, - enum bfa_pport_ln_sm_event event); +static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); +static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event); + +static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event); +static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event); +static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event); +static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event); +static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event); +static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event); +static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event); static struct bfa_sm_table_s hal_pport_sm_table[] = { - {BFA_SM(bfa_pport_sm_uninit), BFA_PPORT_ST_UNINIT}, - {BFA_SM(bfa_pport_sm_enabling_qwait), BFA_PPORT_ST_ENABLING_QWAIT}, - {BFA_SM(bfa_pport_sm_enabling), BFA_PPORT_ST_ENABLING}, - {BFA_SM(bfa_pport_sm_linkdown), BFA_PPORT_ST_LINKDOWN}, - {BFA_SM(bfa_pport_sm_linkup), BFA_PPORT_ST_LINKUP}, - {BFA_SM(bfa_pport_sm_disabling_qwait), - BFA_PPORT_ST_DISABLING_QWAIT}, - {BFA_SM(bfa_pport_sm_disabling), BFA_PPORT_ST_DISABLING}, - {BFA_SM(bfa_pport_sm_disabled), BFA_PPORT_ST_DISABLED}, - {BFA_SM(bfa_pport_sm_stopped), BFA_PPORT_ST_STOPPED}, - {BFA_SM(bfa_pport_sm_iocdown), BFA_PPORT_ST_IOCDOWN}, - {BFA_SM(bfa_pport_sm_iocfail), BFA_PPORT_ST_IOCDOWN}, + {BFA_SM(bfa_fcport_sm_uninit), BFA_PPORT_ST_UNINIT}, + {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PPORT_ST_ENABLING_QWAIT}, + {BFA_SM(bfa_fcport_sm_enabling), BFA_PPORT_ST_ENABLING}, + {BFA_SM(bfa_fcport_sm_linkdown), BFA_PPORT_ST_LINKDOWN}, + {BFA_SM(bfa_fcport_sm_linkup), BFA_PPORT_ST_LINKUP}, + {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PPORT_ST_DISABLING_QWAIT}, + {BFA_SM(bfa_fcport_sm_disabling), BFA_PPORT_ST_DISABLING}, + {BFA_SM(bfa_fcport_sm_disabled), BFA_PPORT_ST_DISABLED}, + {BFA_SM(bfa_fcport_sm_stopped), BFA_PPORT_ST_STOPPED}, + {BFA_SM(bfa_fcport_sm_iocdown), BFA_PPORT_ST_IOCDOWN}, + {BFA_SM(bfa_fcport_sm_iocfail), BFA_PPORT_ST_IOCDOWN}, }; static void -bfa_pport_aen_post(struct bfa_pport_s *pport, enum bfa_port_aen_event event) +bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event) { union bfa_aen_data_u aen_data; - struct bfa_log_mod_s *logmod = pport->bfa->logm; - wwn_t pwwn = pport->pwwn; + struct bfa_log_mod_s *logmod = fcport->bfa->logm; + wwn_t pwwn = fcport->pwwn; char pwwn_ptr[BFA_STRING_32]; struct bfa_ioc_attr_s ioc_attr; @@ -167,28 +172,29 @@ bfa_pport_aen_post(struct bfa_pport_s *pport, enum bfa_port_aen_event event) break; } - bfa_ioc_get_attr(&pport->bfa->ioc, &ioc_attr); + bfa_ioc_get_attr(&fcport->bfa->ioc, &ioc_attr); aen_data.port.ioc_type = ioc_attr.ioc_type; aen_data.port.pwwn = pwwn; } static void -bfa_pport_sm_uninit(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) +bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) { - bfa_trc(pport->bfa, event); + bfa_trc(fcport->bfa, event); switch (event) { - case BFA_PPORT_SM_START: + case BFA_FCPORT_SM_START: /** * Start event after IOC is configured and BFA is started. */ - if (bfa_pport_send_enable(pport)) - bfa_sm_set_state(pport, bfa_pport_sm_enabling); + if (bfa_fcport_send_enable(fcport)) + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); else - bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait); + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait); break; - case BFA_PPORT_SM_ENABLE: + case BFA_FCPORT_SM_ENABLE: /** * Port is persistently configured to be in enabled state. Do * not change state. Port enabling is done when START event is @@ -196,389 +202,395 @@ bfa_pport_sm_uninit(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) */ break; - case BFA_PPORT_SM_DISABLE: + case BFA_FCPORT_SM_DISABLE: /** * If a port is persistently configured to be disabled, the * first event will a port disable request. */ - bfa_sm_set_state(pport, bfa_pport_sm_disabled); + bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); break; - case BFA_PPORT_SM_HWFAIL: - bfa_sm_set_state(pport, bfa_pport_sm_iocdown); + case BFA_FCPORT_SM_HWFAIL: + bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); break; default: - bfa_sm_fault(pport->bfa, event); + bfa_sm_fault(fcport->bfa, event); } } static void -bfa_pport_sm_enabling_qwait(struct bfa_pport_s *pport, - enum bfa_pport_sm_event event) +bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) { - bfa_trc(pport->bfa, event); + bfa_trc(fcport->bfa, event); switch (event) { - case BFA_PPORT_SM_QRESUME: - bfa_sm_set_state(pport, bfa_pport_sm_enabling); - bfa_pport_send_enable(pport); + case BFA_FCPORT_SM_QRESUME: + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); + bfa_fcport_send_enable(fcport); break; - case BFA_PPORT_SM_STOP: - bfa_reqq_wcancel(&pport->reqq_wait); - bfa_sm_set_state(pport, bfa_pport_sm_stopped); + case BFA_FCPORT_SM_STOP: + bfa_reqq_wcancel(&fcport->reqq_wait); + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); break; - case BFA_PPORT_SM_ENABLE: + case BFA_FCPORT_SM_ENABLE: /** * Already enable is in progress. */ break; - case BFA_PPORT_SM_DISABLE: + case BFA_FCPORT_SM_DISABLE: /** * Just send disable request to firmware when room becomes * available in request queue. */ - bfa_sm_set_state(pport, bfa_pport_sm_disabled); - bfa_reqq_wcancel(&pport->reqq_wait); - bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, + bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); + bfa_reqq_wcancel(&fcport->reqq_wait); + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); - bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); break; - case BFA_PPORT_SM_LINKUP: - case BFA_PPORT_SM_LINKDOWN: + case BFA_FCPORT_SM_LINKUP: + case BFA_FCPORT_SM_LINKDOWN: /** * Possible to get link events when doing back-to-back * enable/disables. */ break; - case BFA_PPORT_SM_HWFAIL: - bfa_reqq_wcancel(&pport->reqq_wait); - bfa_sm_set_state(pport, bfa_pport_sm_iocdown); + case BFA_FCPORT_SM_HWFAIL: + bfa_reqq_wcancel(&fcport->reqq_wait); + bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); break; default: - bfa_sm_fault(pport->bfa, event); + bfa_sm_fault(fcport->bfa, event); } } static void -bfa_pport_sm_enabling(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) +bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) { - bfa_trc(pport->bfa, event); + bfa_trc(fcport->bfa, event); switch (event) { - case BFA_PPORT_SM_FWRSP: - case BFA_PPORT_SM_LINKDOWN: - bfa_sm_set_state(pport, bfa_pport_sm_linkdown); + case BFA_FCPORT_SM_FWRSP: + case BFA_FCPORT_SM_LINKDOWN: + bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown); break; - case BFA_PPORT_SM_LINKUP: - bfa_pport_update_linkinfo(pport); - bfa_sm_set_state(pport, bfa_pport_sm_linkup); + case BFA_FCPORT_SM_LINKUP: + bfa_fcport_update_linkinfo(fcport); + bfa_sm_set_state(fcport, bfa_fcport_sm_linkup); - bfa_assert(pport->event_cbfn); - bfa_pport_callback(pport, BFA_PPORT_LINKUP); + bfa_assert(fcport->event_cbfn); + bfa_fcport_callback(fcport, BFA_PPORT_LINKUP); break; - case BFA_PPORT_SM_ENABLE: + case BFA_FCPORT_SM_ENABLE: /** * Already being enabled. */ break; - case BFA_PPORT_SM_DISABLE: - if (bfa_pport_send_disable(pport)) - bfa_sm_set_state(pport, bfa_pport_sm_disabling); + case BFA_FCPORT_SM_DISABLE: + if (bfa_fcport_send_disable(fcport)) + bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); else - bfa_sm_set_state(pport, bfa_pport_sm_disabling_qwait); + bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait); - bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); - bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); break; - case BFA_PPORT_SM_STOP: - bfa_sm_set_state(pport, bfa_pport_sm_stopped); + case BFA_FCPORT_SM_STOP: + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); break; - case BFA_PPORT_SM_HWFAIL: - bfa_sm_set_state(pport, bfa_pport_sm_iocdown); + case BFA_FCPORT_SM_HWFAIL: + bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); break; default: - bfa_sm_fault(pport->bfa, event); + bfa_sm_fault(fcport->bfa, event); } } static void -bfa_pport_sm_linkdown(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) +bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) { - bfa_trc(pport->bfa, event); + bfa_trc(fcport->bfa, event); switch (event) { - case BFA_PPORT_SM_LINKUP: - bfa_pport_update_linkinfo(pport); - bfa_sm_set_state(pport, bfa_pport_sm_linkup); - bfa_assert(pport->event_cbfn); - bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, + case BFA_FCPORT_SM_LINKUP: + bfa_fcport_update_linkinfo(fcport); + bfa_sm_set_state(fcport, bfa_fcport_sm_linkup); + bfa_assert(fcport->event_cbfn); + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup"); - bfa_pport_callback(pport, BFA_PPORT_LINKUP); - bfa_pport_aen_post(pport, BFA_PORT_AEN_ONLINE); + bfa_fcport_callback(fcport, BFA_PPORT_LINKUP); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE); /** * If QoS is enabled and it is not online, * Send a separate event. */ - if ((pport->cfg.qos_enabled) - && (bfa_os_ntohl(pport->qos_attr.state) != BFA_QOS_ONLINE)) - bfa_pport_aen_post(pport, BFA_PORT_AEN_QOS_NEG); + if ((fcport->cfg.qos_enabled) + && (bfa_os_ntohl(fcport->qos_attr.state) != BFA_QOS_ONLINE)) + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG); break; - case BFA_PPORT_SM_LINKDOWN: + case BFA_FCPORT_SM_LINKDOWN: /** * Possible to get link down event. */ break; - case BFA_PPORT_SM_ENABLE: + case BFA_FCPORT_SM_ENABLE: /** * Already enabled. */ break; - case BFA_PPORT_SM_DISABLE: - if (bfa_pport_send_disable(pport)) - bfa_sm_set_state(pport, bfa_pport_sm_disabling); + case BFA_FCPORT_SM_DISABLE: + if (bfa_fcport_send_disable(fcport)) + bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); else - bfa_sm_set_state(pport, bfa_pport_sm_disabling_qwait); + bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait); - bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); - bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); break; - case BFA_PPORT_SM_STOP: - bfa_sm_set_state(pport, bfa_pport_sm_stopped); + case BFA_FCPORT_SM_STOP: + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); break; - case BFA_PPORT_SM_HWFAIL: - bfa_sm_set_state(pport, bfa_pport_sm_iocdown); + case BFA_FCPORT_SM_HWFAIL: + bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); break; default: - bfa_sm_fault(pport->bfa, event); + bfa_sm_fault(fcport->bfa, event); } } static void -bfa_pport_sm_linkup(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) +bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) { - bfa_trc(pport->bfa, event); + bfa_trc(fcport->bfa, event); switch (event) { - case BFA_PPORT_SM_ENABLE: + case BFA_FCPORT_SM_ENABLE: /** * Already enabled. */ break; - case BFA_PPORT_SM_DISABLE: - if (bfa_pport_send_disable(pport)) - bfa_sm_set_state(pport, bfa_pport_sm_disabling); + case BFA_FCPORT_SM_DISABLE: + if (bfa_fcport_send_disable(fcport)) + bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); else - bfa_sm_set_state(pport, bfa_pport_sm_disabling_qwait); + bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait); - bfa_pport_reset_linkinfo(pport); - bfa_pport_callback(pport, BFA_PPORT_LINKDOWN); - bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, + bfa_fcport_reset_linkinfo(fcport); + bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN); + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_DISABLE, 0, "Port Disable"); - bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE); - bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE); break; - case BFA_PPORT_SM_LINKDOWN: - bfa_sm_set_state(pport, bfa_pport_sm_linkdown); - bfa_pport_reset_linkinfo(pport); - bfa_pport_callback(pport, BFA_PPORT_LINKDOWN); - bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, + case BFA_FCPORT_SM_LINKDOWN: + bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown); + bfa_fcport_reset_linkinfo(fcport); + bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN); + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown"); - if (BFA_PORT_IS_DISABLED(pport->bfa)) - bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE); + if (BFA_PORT_IS_DISABLED(fcport->bfa)) + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE); else - bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); break; - case BFA_PPORT_SM_STOP: - bfa_sm_set_state(pport, bfa_pport_sm_stopped); - bfa_pport_reset_linkinfo(pport); - if (BFA_PORT_IS_DISABLED(pport->bfa)) - bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE); + case BFA_FCPORT_SM_STOP: + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); + bfa_fcport_reset_linkinfo(fcport); + if (BFA_PORT_IS_DISABLED(fcport->bfa)) + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE); else - bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); break; - case BFA_PPORT_SM_HWFAIL: - bfa_sm_set_state(pport, bfa_pport_sm_iocdown); - bfa_pport_reset_linkinfo(pport); - bfa_pport_callback(pport, BFA_PPORT_LINKDOWN); - if (BFA_PORT_IS_DISABLED(pport->bfa)) - bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE); + case BFA_FCPORT_SM_HWFAIL: + bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); + bfa_fcport_reset_linkinfo(fcport); + bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN); + if (BFA_PORT_IS_DISABLED(fcport->bfa)) + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE); else - bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT); break; default: - bfa_sm_fault(pport->bfa, event); + bfa_sm_fault(fcport->bfa, event); } } static void -bfa_pport_sm_disabling_qwait(struct bfa_pport_s *pport, - enum bfa_pport_sm_event event) +bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) { - bfa_trc(pport->bfa, event); + bfa_trc(fcport->bfa, event); switch (event) { - case BFA_PPORT_SM_QRESUME: - bfa_sm_set_state(pport, bfa_pport_sm_disabling); - bfa_pport_send_disable(pport); + case BFA_FCPORT_SM_QRESUME: + bfa_sm_set_state(fcport, bfa_fcport_sm_disabling); + bfa_fcport_send_disable(fcport); break; - case BFA_PPORT_SM_STOP: - bfa_sm_set_state(pport, bfa_pport_sm_stopped); - bfa_reqq_wcancel(&pport->reqq_wait); + case BFA_FCPORT_SM_STOP: + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); + bfa_reqq_wcancel(&fcport->reqq_wait); break; - case BFA_PPORT_SM_DISABLE: + case BFA_FCPORT_SM_DISABLE: /** * Already being disabled. */ break; - case BFA_PPORT_SM_LINKUP: - case BFA_PPORT_SM_LINKDOWN: + case BFA_FCPORT_SM_LINKUP: + case BFA_FCPORT_SM_LINKDOWN: /** * Possible to get link events when doing back-to-back * enable/disables. */ break; - case BFA_PPORT_SM_HWFAIL: - bfa_sm_set_state(pport, bfa_pport_sm_iocfail); - bfa_reqq_wcancel(&pport->reqq_wait); + case BFA_FCPORT_SM_HWFAIL: + bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); + bfa_reqq_wcancel(&fcport->reqq_wait); break; default: - bfa_sm_fault(pport->bfa, event); + bfa_sm_fault(fcport->bfa, event); } } static void -bfa_pport_sm_disabling(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) +bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) { - bfa_trc(pport->bfa, event); + bfa_trc(fcport->bfa, event); switch (event) { - case BFA_PPORT_SM_FWRSP: - bfa_sm_set_state(pport, bfa_pport_sm_disabled); + case BFA_FCPORT_SM_FWRSP: + bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); break; - case BFA_PPORT_SM_DISABLE: + case BFA_FCPORT_SM_DISABLE: /** * Already being disabled. */ break; - case BFA_PPORT_SM_ENABLE: - if (bfa_pport_send_enable(pport)) - bfa_sm_set_state(pport, bfa_pport_sm_enabling); + case BFA_FCPORT_SM_ENABLE: + if (bfa_fcport_send_enable(fcport)) + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); else - bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait); + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait); - bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_ENABLE, 0, "Port Enable"); - bfa_pport_aen_post(pport, BFA_PORT_AEN_ENABLE); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE); break; - case BFA_PPORT_SM_STOP: - bfa_sm_set_state(pport, bfa_pport_sm_stopped); + case BFA_FCPORT_SM_STOP: + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); break; - case BFA_PPORT_SM_LINKUP: - case BFA_PPORT_SM_LINKDOWN: + case BFA_FCPORT_SM_LINKUP: + case BFA_FCPORT_SM_LINKDOWN: /** * Possible to get link events when doing back-to-back * enable/disables. */ break; - case BFA_PPORT_SM_HWFAIL: - bfa_sm_set_state(pport, bfa_pport_sm_iocfail); + case BFA_FCPORT_SM_HWFAIL: + bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); break; default: - bfa_sm_fault(pport->bfa, event); + bfa_sm_fault(fcport->bfa, event); } } static void -bfa_pport_sm_disabled(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) +bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) { - bfa_trc(pport->bfa, event); + bfa_trc(fcport->bfa, event); switch (event) { - case BFA_PPORT_SM_START: + case BFA_FCPORT_SM_START: /** * Ignore start event for a port that is disabled. */ break; - case BFA_PPORT_SM_STOP: - bfa_sm_set_state(pport, bfa_pport_sm_stopped); + case BFA_FCPORT_SM_STOP: + bfa_sm_set_state(fcport, bfa_fcport_sm_stopped); break; - case BFA_PPORT_SM_ENABLE: - if (bfa_pport_send_enable(pport)) - bfa_sm_set_state(pport, bfa_pport_sm_enabling); + case BFA_FCPORT_SM_ENABLE: + if (bfa_fcport_send_enable(fcport)) + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); else - bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait); + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait); - bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL, + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_ENABLE, 0, "Port Enable"); - bfa_pport_aen_post(pport, BFA_PORT_AEN_ENABLE); + bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE); break; - case BFA_PPORT_SM_DISABLE: + case BFA_FCPORT_SM_DISABLE: /** * Already disabled. */ break; - case BFA_PPORT_SM_HWFAIL: - bfa_sm_set_state(pport, bfa_pport_sm_iocfail); + case BFA_FCPORT_SM_HWFAIL: + bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail); break; default: - bfa_sm_fault(pport->bfa, event); + bfa_sm_fault(fcport->bfa, event); } } static void -bfa_pport_sm_stopped(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) +bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) { - bfa_trc(pport->bfa, event); + bfa_trc(fcport->bfa, event); switch (event) { - case BFA_PPORT_SM_START: - if (bfa_pport_send_enable(pport)) - bfa_sm_set_state(pport, bfa_pport_sm_enabling); + case BFA_FCPORT_SM_START: + if (bfa_fcport_send_enable(fcport)) + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); else - bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait); + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait); break; default: @@ -593,16 +605,17 @@ bfa_pport_sm_stopped(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) * Port is enabled. IOC is down/failed. */ static void -bfa_pport_sm_iocdown(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) +bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) { - bfa_trc(pport->bfa, event); + bfa_trc(fcport->bfa, event); switch (event) { - case BFA_PPORT_SM_START: - if (bfa_pport_send_enable(pport)) - bfa_sm_set_state(pport, bfa_pport_sm_enabling); + case BFA_FCPORT_SM_START: + if (bfa_fcport_send_enable(fcport)) + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); else - bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait); + bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait); break; default: @@ -617,17 +630,18 @@ bfa_pport_sm_iocdown(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) * Port is disabled. IOC is down/failed. */ static void -bfa_pport_sm_iocfail(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) +bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport, + enum bfa_fcport_sm_event event) { - bfa_trc(pport->bfa, event); + bfa_trc(fcport->bfa, event); switch (event) { - case BFA_PPORT_SM_START: - bfa_sm_set_state(pport, bfa_pport_sm_disabled); + case BFA_FCPORT_SM_START: + bfa_sm_set_state(fcport, bfa_fcport_sm_disabled); break; - case BFA_PPORT_SM_ENABLE: - bfa_sm_set_state(pport, bfa_pport_sm_iocdown); + case BFA_FCPORT_SM_ENABLE: + bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown); break; default: @@ -642,19 +656,19 @@ bfa_pport_sm_iocfail(struct bfa_pport_s *pport, enum bfa_pport_sm_event event) * Link state is down */ static void -bfa_pport_ln_sm_dn(struct bfa_pport_ln_s *ln, - enum bfa_pport_ln_sm_event event) +bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event) { - bfa_trc(ln->pport->bfa, event); + bfa_trc(ln->fcport->bfa, event); switch (event) { - case BFA_PPORT_LN_SM_LINKUP: - bfa_sm_set_state(ln, bfa_pport_ln_sm_up_nf); - bfa_pport_queue_cb(ln, BFA_PPORT_LINKUP); + case BFA_FCPORT_LN_SM_LINKUP: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf); + bfa_fcport_queue_cb(ln, BFA_PPORT_LINKUP); break; default: - bfa_sm_fault(ln->pport->bfa, event); + bfa_sm_fault(ln->fcport->bfa, event); } } @@ -662,22 +676,22 @@ bfa_pport_ln_sm_dn(struct bfa_pport_ln_s *ln, * Link state is waiting for down notification */ static void -bfa_pport_ln_sm_dn_nf(struct bfa_pport_ln_s *ln, - enum bfa_pport_ln_sm_event event) +bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event) { - bfa_trc(ln->pport->bfa, event); + bfa_trc(ln->fcport->bfa, event); switch (event) { - case BFA_PPORT_LN_SM_LINKUP: - bfa_sm_set_state(ln, bfa_pport_ln_sm_dn_up_nf); + case BFA_FCPORT_LN_SM_LINKUP: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf); break; - case BFA_PPORT_LN_SM_NOTIFICATION: - bfa_sm_set_state(ln, bfa_pport_ln_sm_dn); + case BFA_FCPORT_LN_SM_NOTIFICATION: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn); break; default: - bfa_sm_fault(ln->pport->bfa, event); + bfa_sm_fault(ln->fcport->bfa, event); } } @@ -685,23 +699,23 @@ bfa_pport_ln_sm_dn_nf(struct bfa_pport_ln_s *ln, * Link state is waiting for down notification and there is a pending up */ static void -bfa_pport_ln_sm_dn_up_nf(struct bfa_pport_ln_s *ln, - enum bfa_pport_ln_sm_event event) +bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event) { - bfa_trc(ln->pport->bfa, event); + bfa_trc(ln->fcport->bfa, event); switch (event) { - case BFA_PPORT_LN_SM_LINKDOWN: - bfa_sm_set_state(ln, bfa_pport_ln_sm_dn_nf); + case BFA_FCPORT_LN_SM_LINKDOWN: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf); break; - case BFA_PPORT_LN_SM_NOTIFICATION: - bfa_sm_set_state(ln, bfa_pport_ln_sm_up_nf); - bfa_pport_queue_cb(ln, BFA_PPORT_LINKUP); + case BFA_FCPORT_LN_SM_NOTIFICATION: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf); + bfa_fcport_queue_cb(ln, BFA_PPORT_LINKUP); break; default: - bfa_sm_fault(ln->pport->bfa, event); + bfa_sm_fault(ln->fcport->bfa, event); } } @@ -709,19 +723,19 @@ bfa_pport_ln_sm_dn_up_nf(struct bfa_pport_ln_s *ln, * Link state is up */ static void -bfa_pport_ln_sm_up(struct bfa_pport_ln_s *ln, - enum bfa_pport_ln_sm_event event) +bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event) { - bfa_trc(ln->pport->bfa, event); + bfa_trc(ln->fcport->bfa, event); switch (event) { - case BFA_PPORT_LN_SM_LINKDOWN: - bfa_sm_set_state(ln, bfa_pport_ln_sm_dn_nf); - bfa_pport_queue_cb(ln, BFA_PPORT_LINKDOWN); + case BFA_FCPORT_LN_SM_LINKDOWN: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf); + bfa_fcport_queue_cb(ln, BFA_PPORT_LINKDOWN); break; default: - bfa_sm_fault(ln->pport->bfa, event); + bfa_sm_fault(ln->fcport->bfa, event); } } @@ -729,22 +743,22 @@ bfa_pport_ln_sm_up(struct bfa_pport_ln_s *ln, * Link state is waiting for up notification */ static void -bfa_pport_ln_sm_up_nf(struct bfa_pport_ln_s *ln, - enum bfa_pport_ln_sm_event event) +bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event) { - bfa_trc(ln->pport->bfa, event); + bfa_trc(ln->fcport->bfa, event); switch (event) { - case BFA_PPORT_LN_SM_LINKDOWN: - bfa_sm_set_state(ln, bfa_pport_ln_sm_up_dn_nf); + case BFA_FCPORT_LN_SM_LINKDOWN: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf); break; - case BFA_PPORT_LN_SM_NOTIFICATION: - bfa_sm_set_state(ln, bfa_pport_ln_sm_up); + case BFA_FCPORT_LN_SM_NOTIFICATION: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_up); break; default: - bfa_sm_fault(ln->pport->bfa, event); + bfa_sm_fault(ln->fcport->bfa, event); } } @@ -752,23 +766,23 @@ bfa_pport_ln_sm_up_nf(struct bfa_pport_ln_s *ln, * Link state is waiting for up notification and there is a pending down */ static void -bfa_pport_ln_sm_up_dn_nf(struct bfa_pport_ln_s *ln, - enum bfa_pport_ln_sm_event event) +bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event) { - bfa_trc(ln->pport->bfa, event); + bfa_trc(ln->fcport->bfa, event); switch (event) { - case BFA_PPORT_LN_SM_LINKUP: - bfa_sm_set_state(ln, bfa_pport_ln_sm_up_dn_up_nf); + case BFA_FCPORT_LN_SM_LINKUP: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf); break; - case BFA_PPORT_LN_SM_NOTIFICATION: - bfa_sm_set_state(ln, bfa_pport_ln_sm_dn_nf); - bfa_pport_queue_cb(ln, BFA_PPORT_LINKDOWN); + case BFA_FCPORT_LN_SM_NOTIFICATION: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf); + bfa_fcport_queue_cb(ln, BFA_PPORT_LINKDOWN); break; default: - bfa_sm_fault(ln->pport->bfa, event); + bfa_sm_fault(ln->fcport->bfa, event); } } @@ -776,23 +790,23 @@ bfa_pport_ln_sm_up_dn_nf(struct bfa_pport_ln_s *ln, * Link state is waiting for up notification and there are pending down and up */ static void -bfa_pport_ln_sm_up_dn_up_nf(struct bfa_pport_ln_s *ln, - enum bfa_pport_ln_sm_event event) +bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln, + enum bfa_fcport_ln_sm_event event) { - bfa_trc(ln->pport->bfa, event); + bfa_trc(ln->fcport->bfa, event); switch (event) { - case BFA_PPORT_LN_SM_LINKDOWN: - bfa_sm_set_state(ln, bfa_pport_ln_sm_up_dn_nf); + case BFA_FCPORT_LN_SM_LINKDOWN: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf); break; - case BFA_PPORT_LN_SM_NOTIFICATION: - bfa_sm_set_state(ln, bfa_pport_ln_sm_dn_up_nf); - bfa_pport_queue_cb(ln, BFA_PPORT_LINKDOWN); + case BFA_FCPORT_LN_SM_NOTIFICATION: + bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf); + bfa_fcport_queue_cb(ln, BFA_PPORT_LINKDOWN); break; default: - bfa_sm_fault(ln->pport->bfa, event); + bfa_sm_fault(ln->fcport->bfa, event); } } @@ -801,36 +815,40 @@ bfa_pport_ln_sm_up_dn_up_nf(struct bfa_pport_ln_s *ln, */ static void -__bfa_cb_port_event(void *cbarg, bfa_boolean_t complete) +__bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete) { - struct bfa_pport_ln_s *ln = cbarg; + struct bfa_fcport_ln_s *ln = cbarg; if (complete) - ln->pport->event_cbfn(ln->pport->event_cbarg, ln->ln_event); + ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event); else - bfa_sm_send_event(ln, BFA_PPORT_LN_SM_NOTIFICATION); + bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION); } -#define PPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_pport_stats_u), \ +#define PPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \ + BFA_CACHELINE_SZ)) + +#define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \ BFA_CACHELINE_SZ)) static void -bfa_pport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, +bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len) { *dm_len += PPORT_STATS_DMA_SZ; + *dm_len += PPORT_STATS_DMA_SZ; } static void -bfa_pport_qresume(void *cbarg) +bfa_fcport_qresume(void *cbarg) { - struct bfa_pport_s *port = cbarg; + struct bfa_fcport_s *port = cbarg; - bfa_sm_send_event(port, BFA_PPORT_SM_QRESUME); + bfa_sm_send_event(port, BFA_FCPORT_SM_QRESUME); } static void -bfa_pport_mem_claim(struct bfa_pport_s *pport, struct bfa_meminfo_s *meminfo) +bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo) { u8 *dm_kva; u64 dm_pa; @@ -838,13 +856,22 @@ bfa_pport_mem_claim(struct bfa_pport_s *pport, struct bfa_meminfo_s *meminfo) dm_kva = bfa_meminfo_dma_virt(meminfo); dm_pa = bfa_meminfo_dma_phys(meminfo); - pport->stats_kva = dm_kva; - pport->stats_pa = dm_pa; - pport->stats = (union bfa_pport_stats_u *)dm_kva; + fcport->stats_kva = dm_kva; + fcport->stats_pa = dm_pa; + fcport->stats = (union bfa_pport_stats_u *)dm_kva; dm_kva += PPORT_STATS_DMA_SZ; dm_pa += PPORT_STATS_DMA_SZ; + /* FC port stats */ + + fcport->fcport_stats_kva = dm_kva; + fcport->fcport_stats_pa = dm_pa; + fcport->fcport_stats = (union bfa_fcport_stats_u *) dm_kva; + + dm_kva += FCPORT_STATS_DMA_SZ; + dm_pa += FCPORT_STATS_DMA_SZ; + bfa_meminfo_dma_virt(meminfo) = dm_kva; bfa_meminfo_dma_phys(meminfo) = dm_pa; } @@ -853,21 +880,21 @@ bfa_pport_mem_claim(struct bfa_pport_s *pport, struct bfa_meminfo_s *meminfo) * Memory initialization. */ static void -bfa_pport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, +bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) { - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); - struct bfa_pport_cfg_s *port_cfg = &pport->cfg; - struct bfa_pport_ln_s *ln = &pport->ln; + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + struct bfa_pport_cfg_s *port_cfg = &fcport->cfg; + struct bfa_fcport_ln_s *ln = &fcport->ln; - bfa_os_memset(pport, 0, sizeof(struct bfa_pport_s)); - pport->bfa = bfa; - ln->pport = pport; + bfa_os_memset(fcport, 0, sizeof(struct bfa_fcport_s)); + fcport->bfa = bfa; + ln->fcport = fcport; - bfa_pport_mem_claim(pport, meminfo); + bfa_fcport_mem_claim(fcport, meminfo); - bfa_sm_set_state(pport, bfa_pport_sm_uninit); - bfa_sm_set_state(ln, bfa_pport_ln_sm_dn); + bfa_sm_set_state(fcport, bfa_fcport_sm_uninit); + bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn); /** * initialize and set default configuration @@ -879,30 +906,30 @@ bfa_pport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, port_cfg->trl_def_speed = BFA_PPORT_SPEED_1GBPS; - bfa_reqq_winit(&pport->reqq_wait, bfa_pport_qresume, pport); + bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport); } static void -bfa_pport_initdone(struct bfa_s *bfa) +bfa_fcport_initdone(struct bfa_s *bfa) { - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); /** * Initialize port attributes from IOC hardware data. */ - bfa_pport_set_wwns(pport); - if (pport->cfg.maxfrsize == 0) - pport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc); - pport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc); - pport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc); + bfa_fcport_set_wwns(fcport); + if (fcport->cfg.maxfrsize == 0) + fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc); + fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc); + fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc); - bfa_assert(pport->cfg.maxfrsize); - bfa_assert(pport->cfg.rx_bbcredit); - bfa_assert(pport->speed_sup); + bfa_assert(fcport->cfg.maxfrsize); + bfa_assert(fcport->cfg.rx_bbcredit); + bfa_assert(fcport->speed_sup); } static void -bfa_pport_detach(struct bfa_s *bfa) +bfa_fcport_detach(struct bfa_s *bfa) { } @@ -910,62 +937,63 @@ bfa_pport_detach(struct bfa_s *bfa) * Called when IOC is ready. */ static void -bfa_pport_start(struct bfa_s *bfa) +bfa_fcport_start(struct bfa_s *bfa) { - bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_START); + bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START); } /** * Called before IOC is stopped. */ static void -bfa_pport_stop(struct bfa_s *bfa) +bfa_fcport_stop(struct bfa_s *bfa) { - bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_STOP); + bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP); } /** * Called when IOC failure is detected. */ static void -bfa_pport_iocdisable(struct bfa_s *bfa) +bfa_fcport_iocdisable(struct bfa_s *bfa) { - bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_HWFAIL); + bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_HWFAIL); } static void -bfa_pport_update_linkinfo(struct bfa_pport_s *pport) +bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport) { - struct bfi_pport_event_s *pevent = pport->event_arg.i2hmsg.event; + struct bfi_pport_event_s *pevent = fcport->event_arg.i2hmsg.event; - pport->speed = pevent->link_state.speed; - pport->topology = pevent->link_state.topology; + fcport->speed = pevent->link_state.speed; + fcport->topology = pevent->link_state.topology; - if (pport->topology == BFA_PPORT_TOPOLOGY_LOOP) - pport->myalpa = pevent->link_state.tl.loop_info.myalpa; + if (fcport->topology == BFA_PPORT_TOPOLOGY_LOOP) + fcport->myalpa = + pevent->link_state.tl.loop_info.myalpa; /* * QoS Details */ - bfa_os_assign(pport->qos_attr, pevent->link_state.qos_attr); - bfa_os_assign(pport->qos_vc_attr, pevent->link_state.qos_vc_attr); + bfa_os_assign(fcport->qos_attr, pevent->link_state.qos_attr); + bfa_os_assign(fcport->qos_vc_attr, pevent->link_state.qos_vc_attr); - bfa_trc(pport->bfa, pport->speed); - bfa_trc(pport->bfa, pport->topology); + bfa_trc(fcport->bfa, fcport->speed); + bfa_trc(fcport->bfa, fcport->topology); } static void -bfa_pport_reset_linkinfo(struct bfa_pport_s *pport) +bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport) { - pport->speed = BFA_PPORT_SPEED_UNKNOWN; - pport->topology = BFA_PPORT_TOPOLOGY_NONE; + fcport->speed = BFA_PPORT_SPEED_UNKNOWN; + fcport->topology = BFA_PPORT_TOPOLOGY_NONE; } /** * Send port enable message to firmware. */ static bfa_boolean_t -bfa_pport_send_enable(struct bfa_pport_s *port) +bfa_fcport_send_enable(struct bfa_fcport_s *fcport) { struct bfi_pport_enable_req_s *m; @@ -973,32 +1001,34 @@ bfa_pport_send_enable(struct bfa_pport_s *port) * Increment message tag before queue check, so that responses to old * requests are discarded. */ - port->msgtag++; + fcport->msgtag++; /** * check for room in queue to send request now */ - m = bfa_reqq_next(port->bfa, BFA_REQQ_PORT); + m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); if (!m) { - bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->reqq_wait); + bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, + &fcport->reqq_wait); return BFA_FALSE; } bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_ENABLE_REQ, - bfa_lpuid(port->bfa)); - m->nwwn = port->nwwn; - m->pwwn = port->pwwn; - m->port_cfg = port->cfg; - m->msgtag = port->msgtag; - m->port_cfg.maxfrsize = bfa_os_htons(port->cfg.maxfrsize); - bfa_dma_be_addr_set(m->stats_dma_addr, port->stats_pa); - bfa_trc(port->bfa, m->stats_dma_addr.a32.addr_lo); - bfa_trc(port->bfa, m->stats_dma_addr.a32.addr_hi); + bfa_lpuid(fcport->bfa)); + m->nwwn = fcport->nwwn; + m->pwwn = fcport->pwwn; + m->port_cfg = fcport->cfg; + m->msgtag = fcport->msgtag; + m->port_cfg.maxfrsize = bfa_os_htons(fcport->cfg.maxfrsize); + bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa); + bfa_dma_be_addr_set(m->fcport_stats_dma_addr, fcport->fcport_stats_pa); + bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo); + bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi); /** * queue I/O message to firmware */ - bfa_reqq_produce(port->bfa, BFA_REQQ_PORT); + bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); return BFA_TRUE; } @@ -1006,7 +1036,7 @@ bfa_pport_send_enable(struct bfa_pport_s *port) * Send port disable message to firmware. */ static bfa_boolean_t -bfa_pport_send_disable(struct bfa_pport_s *port) +bfa_fcport_send_disable(struct bfa_fcport_s *fcport) { bfi_pport_disable_req_t *m; @@ -1014,63 +1044,64 @@ bfa_pport_send_disable(struct bfa_pport_s *port) * Increment message tag before queue check, so that responses to old * requests are discarded. */ - port->msgtag++; + fcport->msgtag++; /** * check for room in queue to send request now */ - m = bfa_reqq_next(port->bfa, BFA_REQQ_PORT); + m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); if (!m) { - bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->reqq_wait); + bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, + &fcport->reqq_wait); return BFA_FALSE; } bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_DISABLE_REQ, - bfa_lpuid(port->bfa)); - m->msgtag = port->msgtag; + bfa_lpuid(fcport->bfa)); + m->msgtag = fcport->msgtag; /** * queue I/O message to firmware */ - bfa_reqq_produce(port->bfa, BFA_REQQ_PORT); + bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); return BFA_TRUE; } static void -bfa_pport_set_wwns(struct bfa_pport_s *port) +bfa_fcport_set_wwns(struct bfa_fcport_s *fcport) { - port->pwwn = bfa_ioc_get_pwwn(&port->bfa->ioc); - port->nwwn = bfa_ioc_get_nwwn(&port->bfa->ioc); + fcport->pwwn = bfa_ioc_get_pwwn(&fcport->bfa->ioc); + fcport->nwwn = bfa_ioc_get_nwwn(&fcport->bfa->ioc); - bfa_trc(port->bfa, port->pwwn); - bfa_trc(port->bfa, port->nwwn); + bfa_trc(fcport->bfa, fcport->pwwn); + bfa_trc(fcport->bfa, fcport->nwwn); } static void -bfa_port_send_txcredit(void *port_cbarg) +bfa_fcport_send_txcredit(void *port_cbarg) { - struct bfa_pport_s *port = port_cbarg; + struct bfa_fcport_s *fcport = port_cbarg; struct bfi_pport_set_svc_params_req_s *m; /** * check for room in queue to send request now */ - m = bfa_reqq_next(port->bfa, BFA_REQQ_PORT); + m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); if (!m) { - bfa_trc(port->bfa, port->cfg.tx_bbcredit); + bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit); return; } bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_SET_SVC_PARAMS_REQ, - bfa_lpuid(port->bfa)); - m->tx_bbcredit = bfa_os_htons((u16) port->cfg.tx_bbcredit); + bfa_lpuid(fcport->bfa)); + m->tx_bbcredit = bfa_os_htons((u16) fcport->cfg.tx_bbcredit); /** * queue I/O message to firmware */ - bfa_reqq_produce(port->bfa, BFA_REQQ_PORT); + bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); } @@ -1083,32 +1114,32 @@ bfa_port_send_txcredit(void *port_cbarg) * Firmware message handler. */ void -bfa_pport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) +bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) { - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); union bfi_pport_i2h_msg_u i2hmsg; i2hmsg.msg = msg; - pport->event_arg.i2hmsg = i2hmsg; + fcport->event_arg.i2hmsg = i2hmsg; switch (msg->mhdr.msg_id) { case BFI_PPORT_I2H_ENABLE_RSP: - if (pport->msgtag == i2hmsg.enable_rsp->msgtag) - bfa_sm_send_event(pport, BFA_PPORT_SM_FWRSP); + if (fcport->msgtag == i2hmsg.enable_rsp->msgtag) + bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP); break; case BFI_PPORT_I2H_DISABLE_RSP: - if (pport->msgtag == i2hmsg.enable_rsp->msgtag) - bfa_sm_send_event(pport, BFA_PPORT_SM_FWRSP); + if (fcport->msgtag == i2hmsg.enable_rsp->msgtag) + bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP); break; case BFI_PPORT_I2H_EVENT: switch (i2hmsg.event->link_state.linkstate) { case BFA_PPORT_LINKUP: - bfa_sm_send_event(pport, BFA_PPORT_SM_LINKUP); + bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP); break; case BFA_PPORT_LINKDOWN: - bfa_sm_send_event(pport, BFA_PPORT_SM_LINKDOWN); + bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN); break; case BFA_PPORT_TRUNK_LINKDOWN: /** todo: event notification */ @@ -1121,32 +1152,63 @@ bfa_pport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) /* * check for timer pop before processing the rsp */ - if (pport->stats_busy == BFA_FALSE - || pport->stats_status == BFA_STATUS_ETIMER) + if (fcport->stats_busy == BFA_FALSE + || fcport->stats_status == BFA_STATUS_ETIMER) break; - bfa_timer_stop(&pport->timer); - pport->stats_status = i2hmsg.getstats_rsp->status; - bfa_cb_queue(pport->bfa, &pport->hcb_qe, __bfa_cb_port_stats, - pport); + bfa_timer_stop(&fcport->timer); + fcport->stats_status = i2hmsg.getstats_rsp->status; + bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_port_stats, + fcport); break; case BFI_PPORT_I2H_CLEAR_STATS_RSP: case BFI_PPORT_I2H_CLEAR_QOS_STATS_RSP: /* * check for timer pop before processing the rsp */ - if (pport->stats_busy == BFA_FALSE - || pport->stats_status == BFA_STATUS_ETIMER) + if (fcport->stats_busy == BFA_FALSE + || fcport->stats_status == BFA_STATUS_ETIMER) break; - bfa_timer_stop(&pport->timer); - pport->stats_status = BFA_STATUS_OK; - bfa_cb_queue(pport->bfa, &pport->hcb_qe, - __bfa_cb_port_stats_clr, pport); + bfa_timer_stop(&fcport->timer); + fcport->stats_status = BFA_STATUS_OK; + bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, + __bfa_cb_port_stats_clr, fcport); + break; + + case BFI_FCPORT_I2H_GET_STATS_RSP: + /* + * check for timer pop before processing the rsp + */ + if (fcport->stats_busy == BFA_FALSE || + fcport->stats_status == BFA_STATUS_ETIMER) { + break; + } + + bfa_timer_stop(&fcport->timer); + fcport->stats_status = i2hmsg.getstats_rsp->status; + bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, + __bfa_cb_fcport_stats, fcport); + break; + + case BFI_FCPORT_I2H_CLEAR_STATS_RSP: + /* + * check for timer pop before processing the rsp + */ + if (fcport->stats_busy == BFA_FALSE || + fcport->stats_status == BFA_STATUS_ETIMER) { + break; + } + + bfa_timer_stop(&fcport->timer); + fcport->stats_status = BFA_STATUS_OK; + bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, + __bfa_cb_fcport_stats_clr, fcport); break; default: bfa_assert(0); + break; } } @@ -1160,35 +1222,35 @@ bfa_pport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) * Registered callback for port events. */ void -bfa_pport_event_register(struct bfa_s *bfa, +bfa_fcport_event_register(struct bfa_s *bfa, void (*cbfn) (void *cbarg, bfa_pport_event_t event), void *cbarg) { - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - pport->event_cbfn = cbfn; - pport->event_cbarg = cbarg; + fcport->event_cbfn = cbfn; + fcport->event_cbarg = cbarg; } bfa_status_t -bfa_pport_enable(struct bfa_s *bfa) +bfa_fcport_enable(struct bfa_s *bfa) { - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - if (pport->diag_busy) + if (fcport->diag_busy) return BFA_STATUS_DIAG_BUSY; else if (bfa_sm_cmp_state - (BFA_PORT_MOD(bfa), bfa_pport_sm_disabling_qwait)) + (BFA_FCPORT_MOD(bfa), bfa_fcport_sm_disabling_qwait)) return BFA_STATUS_DEVBUSY; - bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_ENABLE); + bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE); return BFA_STATUS_OK; } bfa_status_t -bfa_pport_disable(struct bfa_s *bfa) +bfa_fcport_disable(struct bfa_s *bfa) { - bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_DISABLE); + bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE); return BFA_STATUS_OK; } @@ -1196,18 +1258,18 @@ bfa_pport_disable(struct bfa_s *bfa) * Configure port speed. */ bfa_status_t -bfa_pport_cfg_speed(struct bfa_s *bfa, enum bfa_pport_speed speed) +bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_pport_speed speed) { - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); bfa_trc(bfa, speed); - if ((speed != BFA_PPORT_SPEED_AUTO) && (speed > pport->speed_sup)) { - bfa_trc(bfa, pport->speed_sup); + if ((speed != BFA_PPORT_SPEED_AUTO) && (speed > fcport->speed_sup)) { + bfa_trc(bfa, fcport->speed_sup); return BFA_STATUS_UNSUPP_SPEED; } - pport->cfg.speed = speed; + fcport->cfg.speed = speed; return BFA_STATUS_OK; } @@ -1216,23 +1278,23 @@ bfa_pport_cfg_speed(struct bfa_s *bfa, enum bfa_pport_speed speed) * Get current speed. */ enum bfa_pport_speed -bfa_pport_get_speed(struct bfa_s *bfa) +bfa_fcport_get_speed(struct bfa_s *bfa) { - struct bfa_pport_s *port = BFA_PORT_MOD(bfa); + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - return port->speed; + return fcport->speed; } /** * Configure port topology. */ bfa_status_t -bfa_pport_cfg_topology(struct bfa_s *bfa, enum bfa_pport_topology topology) +bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_pport_topology topology) { - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); bfa_trc(bfa, topology); - bfa_trc(bfa, pport->cfg.topology); + bfa_trc(bfa, fcport->cfg.topology); switch (topology) { case BFA_PPORT_TOPOLOGY_P2P: @@ -1244,7 +1306,7 @@ bfa_pport_cfg_topology(struct bfa_s *bfa, enum bfa_pport_topology topology) return BFA_STATUS_EINVAL; } - pport->cfg.topology = topology; + fcport->cfg.topology = topology; return BFA_STATUS_OK; } @@ -1252,64 +1314,64 @@ bfa_pport_cfg_topology(struct bfa_s *bfa, enum bfa_pport_topology topology) * Get current topology. */ enum bfa_pport_topology -bfa_pport_get_topology(struct bfa_s *bfa) +bfa_fcport_get_topology(struct bfa_s *bfa) { - struct bfa_pport_s *port = BFA_PORT_MOD(bfa); + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - return port->topology; + return fcport->topology; } bfa_status_t -bfa_pport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa) +bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa) { - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); bfa_trc(bfa, alpa); - bfa_trc(bfa, pport->cfg.cfg_hardalpa); - bfa_trc(bfa, pport->cfg.hardalpa); + bfa_trc(bfa, fcport->cfg.cfg_hardalpa); + bfa_trc(bfa, fcport->cfg.hardalpa); - pport->cfg.cfg_hardalpa = BFA_TRUE; - pport->cfg.hardalpa = alpa; + fcport->cfg.cfg_hardalpa = BFA_TRUE; + fcport->cfg.hardalpa = alpa; return BFA_STATUS_OK; } bfa_status_t -bfa_pport_clr_hardalpa(struct bfa_s *bfa) +bfa_fcport_clr_hardalpa(struct bfa_s *bfa) { - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - bfa_trc(bfa, pport->cfg.cfg_hardalpa); - bfa_trc(bfa, pport->cfg.hardalpa); + bfa_trc(bfa, fcport->cfg.cfg_hardalpa); + bfa_trc(bfa, fcport->cfg.hardalpa); - pport->cfg.cfg_hardalpa = BFA_FALSE; + fcport->cfg.cfg_hardalpa = BFA_FALSE; return BFA_STATUS_OK; } bfa_boolean_t -bfa_pport_get_hardalpa(struct bfa_s *bfa, u8 *alpa) +bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa) { - struct bfa_pport_s *port = BFA_PORT_MOD(bfa); + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - *alpa = port->cfg.hardalpa; - return port->cfg.cfg_hardalpa; + *alpa = fcport->cfg.hardalpa; + return fcport->cfg.cfg_hardalpa; } u8 -bfa_pport_get_myalpa(struct bfa_s *bfa) +bfa_fcport_get_myalpa(struct bfa_s *bfa) { - struct bfa_pport_s *port = BFA_PORT_MOD(bfa); + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - return port->myalpa; + return fcport->myalpa; } bfa_status_t -bfa_pport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize) +bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize) { - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); bfa_trc(bfa, maxfrsize); - bfa_trc(bfa, pport->cfg.maxfrsize); + bfa_trc(bfa, fcport->cfg.maxfrsize); /* * with in range @@ -1323,41 +1385,41 @@ bfa_pport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize) if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1))) return BFA_STATUS_INVLD_DFSZ; - pport->cfg.maxfrsize = maxfrsize; + fcport->cfg.maxfrsize = maxfrsize; return BFA_STATUS_OK; } u16 -bfa_pport_get_maxfrsize(struct bfa_s *bfa) +bfa_fcport_get_maxfrsize(struct bfa_s *bfa) { - struct bfa_pport_s *port = BFA_PORT_MOD(bfa); + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - return port->cfg.maxfrsize; + return fcport->cfg.maxfrsize; } u32 -bfa_pport_mypid(struct bfa_s *bfa) +bfa_fcport_mypid(struct bfa_s *bfa) { - struct bfa_pport_s *port = BFA_PORT_MOD(bfa); + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - return port->mypid; + return fcport->mypid; } u8 -bfa_pport_get_rx_bbcredit(struct bfa_s *bfa) +bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa) { - struct bfa_pport_s *port = BFA_PORT_MOD(bfa); + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - return port->cfg.rx_bbcredit; + return fcport->cfg.rx_bbcredit; } void -bfa_pport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit) +bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit) { - struct bfa_pport_s *port = BFA_PORT_MOD(bfa); + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - port->cfg.tx_bbcredit = (u8) tx_bbcredit; - bfa_port_send_txcredit(port); + fcport->cfg.tx_bbcredit = (u8) tx_bbcredit; + bfa_fcport_send_txcredit(fcport); } /** @@ -1365,78 +1427,79 @@ bfa_pport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit) */ wwn_t -bfa_pport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node) +bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node) { - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); if (node) - return pport->nwwn; + return fcport->nwwn; else - return pport->pwwn; + return fcport->pwwn; } void -bfa_pport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr) +bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr) { - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); bfa_os_memset(attr, 0, sizeof(struct bfa_pport_attr_s)); - attr->nwwn = pport->nwwn; - attr->pwwn = pport->pwwn; + attr->nwwn = fcport->nwwn; + attr->pwwn = fcport->pwwn; - bfa_os_memcpy(&attr->pport_cfg, &pport->cfg, + bfa_os_memcpy(&attr->pport_cfg, &fcport->cfg, sizeof(struct bfa_pport_cfg_s)); /* * speed attributes */ - attr->pport_cfg.speed = pport->cfg.speed; - attr->speed_supported = pport->speed_sup; - attr->speed = pport->speed; + attr->pport_cfg.speed = fcport->cfg.speed; + attr->speed_supported = fcport->speed_sup; + attr->speed = fcport->speed; attr->cos_supported = FC_CLASS_3; /* * topology attributes */ - attr->pport_cfg.topology = pport->cfg.topology; - attr->topology = pport->topology; + attr->pport_cfg.topology = fcport->cfg.topology; + attr->topology = fcport->topology; /* * beacon attributes */ - attr->beacon = pport->beacon; - attr->link_e2e_beacon = pport->link_e2e_beacon; - attr->plog_enabled = bfa_plog_get_setting(pport->bfa->plog); + attr->beacon = fcport->beacon; + attr->link_e2e_beacon = fcport->link_e2e_beacon; + attr->plog_enabled = bfa_plog_get_setting(fcport->bfa->plog); attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa); attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa); - attr->port_state = bfa_sm_to_state(hal_pport_sm_table, pport->sm); - if (bfa_ioc_is_disabled(&pport->bfa->ioc)) + attr->port_state = bfa_sm_to_state(hal_pport_sm_table, fcport->sm); + if (bfa_ioc_is_disabled(&fcport->bfa->ioc)) attr->port_state = BFA_PPORT_ST_IOCDIS; - else if (bfa_ioc_fw_mismatch(&pport->bfa->ioc)) + else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc)) attr->port_state = BFA_PPORT_ST_FWMISMATCH; } static void bfa_port_stats_query(void *cbarg) { - struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg; + struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg; bfi_pport_get_stats_req_t *msg; - msg = bfa_reqq_next(port->bfa, BFA_REQQ_PORT); + msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); if (!msg) { - port->stats_qfull = BFA_TRUE; - bfa_reqq_winit(&port->stats_reqq_wait, bfa_port_stats_query, - port); - bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->stats_reqq_wait); + fcport->stats_qfull = BFA_TRUE; + bfa_reqq_winit(&fcport->stats_reqq_wait, bfa_port_stats_query, + fcport); + bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, + &fcport->stats_reqq_wait); return; } - port->stats_qfull = BFA_FALSE; + fcport->stats_qfull = BFA_FALSE; bfa_os_memset(msg, 0, sizeof(bfi_pport_get_stats_req_t)); bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_GET_STATS_REQ, - bfa_lpuid(port->bfa)); - bfa_reqq_produce(port->bfa, BFA_REQQ_PORT); + bfa_lpuid(fcport->bfa)); + bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); return; } @@ -1444,65 +1507,111 @@ bfa_port_stats_query(void *cbarg) static void bfa_port_stats_clear(void *cbarg) { - struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg; + struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg; bfi_pport_clear_stats_req_t *msg; - msg = bfa_reqq_next(port->bfa, BFA_REQQ_PORT); + msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); if (!msg) { - port->stats_qfull = BFA_TRUE; - bfa_reqq_winit(&port->stats_reqq_wait, bfa_port_stats_clear, - port); - bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->stats_reqq_wait); + fcport->stats_qfull = BFA_TRUE; + bfa_reqq_winit(&fcport->stats_reqq_wait, bfa_port_stats_clear, + fcport); + bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, + &fcport->stats_reqq_wait); return; } - port->stats_qfull = BFA_FALSE; + fcport->stats_qfull = BFA_FALSE; bfa_os_memset(msg, 0, sizeof(bfi_pport_clear_stats_req_t)); bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_CLEAR_STATS_REQ, - bfa_lpuid(port->bfa)); - bfa_reqq_produce(port->bfa, BFA_REQQ_PORT); + bfa_lpuid(fcport->bfa)); + bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); return; } +static void +bfa_fcport_stats_query(void *cbarg) +{ + struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; + bfi_pport_get_stats_req_t *msg; + + msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); + + if (!msg) { + fcport->stats_qfull = BFA_TRUE; + bfa_reqq_winit(&fcport->stats_reqq_wait, + bfa_fcport_stats_query, fcport); + bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, + &fcport->stats_reqq_wait); + return; + } + fcport->stats_qfull = BFA_FALSE; + + bfa_os_memset(msg, 0, sizeof(bfi_pport_get_stats_req_t)); + bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_FCPORT_H2I_GET_STATS_REQ, + bfa_lpuid(fcport->bfa)); + bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); +} + +static void +bfa_fcport_stats_clear(void *cbarg) +{ + struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; + bfi_pport_clear_stats_req_t *msg; + + msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); + + if (!msg) { + fcport->stats_qfull = BFA_TRUE; + bfa_reqq_winit(&fcport->stats_reqq_wait, + bfa_fcport_stats_clear, fcport); + bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, + &fcport->stats_reqq_wait); + return; + } + fcport->stats_qfull = BFA_FALSE; + + bfa_os_memset(msg, 0, sizeof(bfi_pport_clear_stats_req_t)); + bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_FCPORT_H2I_CLEAR_STATS_REQ, + bfa_lpuid(fcport->bfa)); + bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); +} + static void bfa_port_qos_stats_clear(void *cbarg) { - struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg; + struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg; bfi_pport_clear_qos_stats_req_t *msg; - msg = bfa_reqq_next(port->bfa, BFA_REQQ_PORT); + msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); if (!msg) { - port->stats_qfull = BFA_TRUE; - bfa_reqq_winit(&port->stats_reqq_wait, bfa_port_qos_stats_clear, - port); - bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->stats_reqq_wait); + fcport->stats_qfull = BFA_TRUE; + bfa_reqq_winit(&fcport->stats_reqq_wait, + bfa_port_qos_stats_clear, fcport); + bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, + &fcport->stats_reqq_wait); return; } - port->stats_qfull = BFA_FALSE; + fcport->stats_qfull = BFA_FALSE; bfa_os_memset(msg, 0, sizeof(bfi_pport_clear_qos_stats_req_t)); bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_CLEAR_QOS_STATS_REQ, - bfa_lpuid(port->bfa)); - bfa_reqq_produce(port->bfa, BFA_REQQ_PORT); + bfa_lpuid(fcport->bfa)); + bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); return; } static void -bfa_pport_stats_swap(union bfa_pport_stats_u *d, union bfa_pport_stats_u *s) +bfa_fcport_stats_swap(union bfa_fcport_stats_u *d, union bfa_fcport_stats_u *s) { - u32 *dip = (u32 *) d; - u32 *sip = (u32 *) s; + u32 *dip = (u32 *) d; + u32 *sip = (u32 *) s; int i; - /* - * Do 64 bit fields swap first - */ - for (i = 0; - i < - ((sizeof(union bfa_pport_stats_u) - - sizeof(struct bfa_qos_stats_s)) / sizeof(u32)); i = i + 2) { + /* Do 64 bit fields swap first */ + for (i = 0; i < ((sizeof(union bfa_fcport_stats_u) - + sizeof(struct bfa_qos_stats_s))/sizeof(u32)); i = i + 2) { #ifdef __BIGENDIAN dip[i] = bfa_os_ntohl(sip[i]); dip[i + 1] = bfa_os_ntohl(sip[i + 1]); @@ -1512,56 +1621,88 @@ bfa_pport_stats_swap(union bfa_pport_stats_u *d, union bfa_pport_stats_u *s) #endif } - /* - * Now swap the 32 bit fields - */ - for (; i < (sizeof(union bfa_pport_stats_u) / sizeof(u32)); ++i) + /* Now swap the 32 bit fields */ + for (; i < (sizeof(union bfa_fcport_stats_u)/sizeof(u32)); ++i) + dip[i] = bfa_os_ntohl(sip[i]); +} + +static void +bfa_port_stats_swap(union bfa_pport_stats_u *d, union bfa_pport_stats_u *s) +{ + u32 *dip = (u32 *) d; + u32 *sip = (u32 *) s; + int i; + + /* Do 64 bit fields swap first */ + for (i = 0; i < (sizeof(union bfa_pport_stats_u) / sizeof(u32)); + i = i + 2) { +#ifdef __BIGENDIAN dip[i] = bfa_os_ntohl(sip[i]); + dip[i + 1] = bfa_os_ntohl(sip[i + 1]); +#else + dip[i] = bfa_os_ntohl(sip[i + 1]); + dip[i + 1] = bfa_os_ntohl(sip[i]); +#endif + } } static void __bfa_cb_port_stats_clr(void *cbarg, bfa_boolean_t complete) { - struct bfa_pport_s *port = cbarg; + struct bfa_fcport_s *fcport = cbarg; if (complete) { - port->stats_cbfn(port->stats_cbarg, port->stats_status); + fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status); } else { - port->stats_busy = BFA_FALSE; - port->stats_status = BFA_STATUS_OK; + fcport->stats_busy = BFA_FALSE; + fcport->stats_status = BFA_STATUS_OK; + } +} + +static void +__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_fcport_s *fcport = cbarg; + + if (complete) { + fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status); + } else { + fcport->stats_busy = BFA_FALSE; + fcport->stats_status = BFA_STATUS_OK; } } static void bfa_port_stats_clr_timeout(void *cbarg) { - struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg; + struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg; - bfa_trc(port->bfa, port->stats_qfull); + bfa_trc(fcport->bfa, fcport->stats_qfull); - if (port->stats_qfull) { - bfa_reqq_wcancel(&port->stats_reqq_wait); - port->stats_qfull = BFA_FALSE; + if (fcport->stats_qfull) { + bfa_reqq_wcancel(&fcport->stats_reqq_wait); + fcport->stats_qfull = BFA_FALSE; } - port->stats_status = BFA_STATUS_ETIMER; - bfa_cb_queue(port->bfa, &port->hcb_qe, __bfa_cb_port_stats_clr, port); + fcport->stats_status = BFA_STATUS_ETIMER; + bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, + __bfa_cb_port_stats_clr, fcport); } static void -bfa_pport_callback(struct bfa_pport_s *pport, enum bfa_pport_linkstate event) +bfa_fcport_callback(struct bfa_fcport_s *fcport, enum bfa_pport_linkstate event) { - if (pport->bfa->fcs) { - pport->event_cbfn(pport->event_cbarg, event); + if (fcport->bfa->fcs) { + fcport->event_cbfn(fcport->event_cbarg, event); return; } switch (event) { case BFA_PPORT_LINKUP: - bfa_sm_send_event(&pport->ln, BFA_PPORT_LN_SM_LINKUP); + bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP); break; case BFA_PPORT_LINKDOWN: - bfa_sm_send_event(&pport->ln, BFA_PPORT_LN_SM_LINKDOWN); + bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN); break; default: bfa_assert(0); @@ -1569,41 +1710,92 @@ bfa_pport_callback(struct bfa_pport_s *pport, enum bfa_pport_linkstate event) } static void -bfa_pport_queue_cb(struct bfa_pport_ln_s *ln, enum bfa_pport_linkstate event) +bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_pport_linkstate event) { ln->ln_event = event; - bfa_cb_queue(ln->pport->bfa, &ln->ln_qe, __bfa_cb_port_event, ln); + bfa_cb_queue(ln->fcport->bfa, &ln->ln_qe, __bfa_cb_fcport_event, ln); +} + +static void +bfa_fcport_stats_clr_timeout(void *cbarg) +{ + struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; + + bfa_trc(fcport->bfa, fcport->stats_qfull); + + if (fcport->stats_qfull) { + bfa_reqq_wcancel(&fcport->stats_reqq_wait); + fcport->stats_qfull = BFA_FALSE; + } + + fcport->stats_status = BFA_STATUS_ETIMER; + bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_clr, + fcport); } static void __bfa_cb_port_stats(void *cbarg, bfa_boolean_t complete) { - struct bfa_pport_s *port = cbarg; + struct bfa_fcport_s *fcport = cbarg; if (complete) { - if (port->stats_status == BFA_STATUS_OK) - bfa_pport_stats_swap(port->stats_ret, port->stats); - port->stats_cbfn(port->stats_cbarg, port->stats_status); + if (fcport->stats_status == BFA_STATUS_OK) + bfa_port_stats_swap(fcport->stats_ret, fcport->stats); + fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status); } else { - port->stats_busy = BFA_FALSE; - port->stats_status = BFA_STATUS_OK; + fcport->stats_busy = BFA_FALSE; + fcport->stats_status = BFA_STATUS_OK; } } static void bfa_port_stats_timeout(void *cbarg) { - struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg; + struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg; - bfa_trc(port->bfa, port->stats_qfull); + bfa_trc(fcport->bfa, fcport->stats_qfull); - if (port->stats_qfull) { - bfa_reqq_wcancel(&port->stats_reqq_wait); - port->stats_qfull = BFA_FALSE; + if (fcport->stats_qfull) { + bfa_reqq_wcancel(&fcport->stats_reqq_wait); + fcport->stats_qfull = BFA_FALSE; } - port->stats_status = BFA_STATUS_ETIMER; - bfa_cb_queue(port->bfa, &port->hcb_qe, __bfa_cb_port_stats, port); + fcport->stats_status = BFA_STATUS_ETIMER; + bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_port_stats, fcport); +} + +static void +__bfa_cb_fcport_stats(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_fcport_s *fcport = cbarg; + + if (complete) { + if (fcport->stats_status == BFA_STATUS_OK) { + bfa_fcport_stats_swap(fcport->fcport_stats_ret, + fcport->fcport_stats); + } + fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status); + } else { + fcport->stats_busy = BFA_FALSE; + fcport->stats_status = BFA_STATUS_OK; + } +} + +static void +bfa_fcport_stats_timeout(void *cbarg) +{ + struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; + + bfa_trc(fcport->bfa, fcport->stats_qfull); + + if (fcport->stats_qfull) { + bfa_reqq_wcancel(&fcport->stats_reqq_wait); + fcport->stats_qfull = BFA_FALSE; + } + + fcport->stats_status = BFA_STATUS_ETIMER; + bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats, + fcport); } #define BFA_PORT_STATS_TOV 1000 @@ -1615,21 +1807,21 @@ bfa_status_t bfa_pport_get_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats, bfa_cb_pport_t cbfn, void *cbarg) { - struct bfa_pport_s *port = BFA_PORT_MOD(bfa); + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - if (port->stats_busy) { - bfa_trc(bfa, port->stats_busy); + if (fcport->stats_busy) { + bfa_trc(bfa, fcport->stats_busy); return BFA_STATUS_DEVBUSY; } - port->stats_busy = BFA_TRUE; - port->stats_ret = stats; - port->stats_cbfn = cbfn; - port->stats_cbarg = cbarg; + fcport->stats_busy = BFA_TRUE; + fcport->stats_ret = stats; + fcport->stats_cbfn = cbfn; + fcport->stats_cbarg = cbarg; - bfa_port_stats_query(port); + bfa_port_stats_query(fcport); - bfa_timer_start(bfa, &port->timer, bfa_port_stats_timeout, port, + bfa_timer_start(bfa, &fcport->timer, bfa_port_stats_timeout, fcport, BFA_PORT_STATS_TOV); return BFA_STATUS_OK; } @@ -1637,57 +1829,111 @@ bfa_pport_get_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats, bfa_status_t bfa_pport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg) { - struct bfa_pport_s *port = BFA_PORT_MOD(bfa); + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + if (fcport->stats_busy) { + bfa_trc(bfa, fcport->stats_busy); + return BFA_STATUS_DEVBUSY; + } + + fcport->stats_busy = BFA_TRUE; + fcport->stats_cbfn = cbfn; + fcport->stats_cbarg = cbarg; + + bfa_port_stats_clear(fcport); + + bfa_timer_start(bfa, &fcport->timer, bfa_port_stats_clr_timeout, + fcport, BFA_PORT_STATS_TOV); + return BFA_STATUS_OK; +} + +/** + * @brief + * Fetch FCPort statistics. + * Todo TBD: sharing timer,stats_busy and other resources of fcport for now - + * ideally we want to create seperate ones for fcport once bfa_fcport_s is + * decided. + * + */ +bfa_status_t +bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats, + bfa_cb_pport_t cbfn, void *cbarg) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - if (port->stats_busy) { - bfa_trc(bfa, port->stats_busy); + if (fcport->stats_busy) { + bfa_trc(bfa, fcport->stats_busy); return BFA_STATUS_DEVBUSY; } - port->stats_busy = BFA_TRUE; - port->stats_cbfn = cbfn; - port->stats_cbarg = cbarg; + fcport->stats_busy = BFA_TRUE; + fcport->fcport_stats_ret = stats; + fcport->stats_cbfn = cbfn; + fcport->stats_cbarg = cbarg; - bfa_port_stats_clear(port); + bfa_fcport_stats_query(fcport); - bfa_timer_start(bfa, &port->timer, bfa_port_stats_clr_timeout, port, + bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_timeout, fcport, BFA_PORT_STATS_TOV); + + return BFA_STATUS_OK; +} + +bfa_status_t +bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg) +{ + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + + if (fcport->stats_busy) { + bfa_trc(bfa, fcport->stats_busy); + return BFA_STATUS_DEVBUSY; + } + + fcport->stats_busy = BFA_TRUE; + fcport->stats_cbfn = cbfn; + fcport->stats_cbarg = cbarg; + + bfa_fcport_stats_clear(fcport); + + bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout, + fcport, BFA_PORT_STATS_TOV); + return BFA_STATUS_OK; } bfa_status_t -bfa_pport_trunk_enable(struct bfa_s *bfa, u8 bitmap) +bfa_fcport_trunk_enable(struct bfa_s *bfa, u8 bitmap) { - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); bfa_trc(bfa, bitmap); - bfa_trc(bfa, pport->cfg.trunked); - bfa_trc(bfa, pport->cfg.trunk_ports); + bfa_trc(bfa, fcport->cfg.trunked); + bfa_trc(bfa, fcport->cfg.trunk_ports); if (!bitmap || (bitmap & (bitmap - 1))) return BFA_STATUS_EINVAL; - pport->cfg.trunked = BFA_TRUE; - pport->cfg.trunk_ports = bitmap; + fcport->cfg.trunked = BFA_TRUE; + fcport->cfg.trunk_ports = bitmap; return BFA_STATUS_OK; } void -bfa_pport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr) +bfa_fcport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr) { - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - qos_attr->state = bfa_os_ntohl(pport->qos_attr.state); - qos_attr->total_bb_cr = bfa_os_ntohl(pport->qos_attr.total_bb_cr); + qos_attr->state = bfa_os_ntohl(fcport->qos_attr.state); + qos_attr->total_bb_cr = bfa_os_ntohl(fcport->qos_attr.total_bb_cr); } void -bfa_pport_qos_get_vc_attr(struct bfa_s *bfa, +bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa, struct bfa_qos_vc_attr_s *qos_vc_attr) { - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); - struct bfa_qos_vc_attr_s *bfa_vc_attr = &pport->qos_vc_attr; + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr; u32 i = 0; qos_vc_attr->total_vc_count = bfa_os_ntohs(bfa_vc_attr->total_vc_count); @@ -1713,7 +1959,7 @@ bfa_pport_qos_get_vc_attr(struct bfa_s *bfa, * Fetch QoS Stats. */ bfa_status_t -bfa_pport_get_qos_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats, +bfa_fcport_get_qos_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats, bfa_cb_pport_t cbfn, void *cbarg) { /* @@ -1723,23 +1969,23 @@ bfa_pport_get_qos_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats, } bfa_status_t -bfa_pport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg) +bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg) { - struct bfa_pport_s *port = BFA_PORT_MOD(bfa); + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - if (port->stats_busy) { - bfa_trc(bfa, port->stats_busy); + if (fcport->stats_busy) { + bfa_trc(bfa, fcport->stats_busy); return BFA_STATUS_DEVBUSY; } - port->stats_busy = BFA_TRUE; - port->stats_cbfn = cbfn; - port->stats_cbarg = cbarg; + fcport->stats_busy = BFA_TRUE; + fcport->stats_cbfn = cbfn; + fcport->stats_cbarg = cbarg; - bfa_port_qos_stats_clear(port); + bfa_port_qos_stats_clear(fcport); - bfa_timer_start(bfa, &port->timer, bfa_port_stats_clr_timeout, port, - BFA_PORT_STATS_TOV); + bfa_timer_start(bfa, &fcport->timer, bfa_port_stats_clr_timeout, + fcport, BFA_PORT_STATS_TOV); return BFA_STATUS_OK; } @@ -1747,82 +1993,82 @@ bfa_pport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg) * Fetch port attributes. */ bfa_status_t -bfa_pport_trunk_disable(struct bfa_s *bfa) +bfa_fcport_trunk_disable(struct bfa_s *bfa) { return BFA_STATUS_OK; } bfa_boolean_t -bfa_pport_trunk_query(struct bfa_s *bfa, u32 *bitmap) +bfa_fcport_trunk_query(struct bfa_s *bfa, u32 *bitmap) { - struct bfa_pport_s *port = BFA_PORT_MOD(bfa); + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - *bitmap = port->cfg.trunk_ports; - return port->cfg.trunked; + *bitmap = fcport->cfg.trunk_ports; + return fcport->cfg.trunked; } bfa_boolean_t -bfa_pport_is_disabled(struct bfa_s *bfa) +bfa_fcport_is_disabled(struct bfa_s *bfa) { - struct bfa_pport_s *port = BFA_PORT_MOD(bfa); + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - return bfa_sm_to_state(hal_pport_sm_table, port->sm) == + return bfa_sm_to_state(hal_pport_sm_table, fcport->sm) == BFA_PPORT_ST_DISABLED; } bfa_boolean_t -bfa_pport_is_ratelim(struct bfa_s *bfa) +bfa_fcport_is_ratelim(struct bfa_s *bfa) { - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - return pport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE; + return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE; } void -bfa_pport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off) +bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off) { - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); bfa_trc(bfa, on_off); - bfa_trc(bfa, pport->cfg.qos_enabled); + bfa_trc(bfa, fcport->cfg.qos_enabled); - pport->cfg.qos_enabled = on_off; + fcport->cfg.qos_enabled = on_off; } void -bfa_pport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off) +bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off) { - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); bfa_trc(bfa, on_off); - bfa_trc(bfa, pport->cfg.ratelimit); + bfa_trc(bfa, fcport->cfg.ratelimit); - pport->cfg.ratelimit = on_off; - if (pport->cfg.trl_def_speed == BFA_PPORT_SPEED_UNKNOWN) - pport->cfg.trl_def_speed = BFA_PPORT_SPEED_1GBPS; + fcport->cfg.ratelimit = on_off; + if (fcport->cfg.trl_def_speed == BFA_PPORT_SPEED_UNKNOWN) + fcport->cfg.trl_def_speed = BFA_PPORT_SPEED_1GBPS; } /** * Configure default minimum ratelim speed */ bfa_status_t -bfa_pport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_pport_speed speed) +bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_pport_speed speed) { - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); bfa_trc(bfa, speed); /* * Auto and speeds greater than the supported speed, are invalid */ - if ((speed == BFA_PPORT_SPEED_AUTO) || (speed > pport->speed_sup)) { - bfa_trc(bfa, pport->speed_sup); + if ((speed == BFA_PPORT_SPEED_AUTO) || (speed > fcport->speed_sup)) { + bfa_trc(bfa, fcport->speed_sup); return BFA_STATUS_UNSUPP_SPEED; } - pport->cfg.trl_def_speed = speed; + fcport->cfg.trl_def_speed = speed; return BFA_STATUS_OK; } @@ -1831,45 +2077,45 @@ bfa_pport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_pport_speed speed) * Get default minimum ratelim speed */ enum bfa_pport_speed -bfa_pport_get_ratelim_speed(struct bfa_s *bfa) +bfa_fcport_get_ratelim_speed(struct bfa_s *bfa) { - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - bfa_trc(bfa, pport->cfg.trl_def_speed); - return pport->cfg.trl_def_speed; + bfa_trc(bfa, fcport->cfg.trl_def_speed); + return fcport->cfg.trl_def_speed; } void -bfa_pport_busy(struct bfa_s *bfa, bfa_boolean_t status) +bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status) { - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); bfa_trc(bfa, status); - bfa_trc(bfa, pport->diag_busy); + bfa_trc(bfa, fcport->diag_busy); - pport->diag_busy = status; + fcport->diag_busy = status; } void -bfa_pport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon, +bfa_fcport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon, bfa_boolean_t link_e2e_beacon) { - struct bfa_pport_s *pport = BFA_PORT_MOD(bfa); + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); bfa_trc(bfa, beacon); bfa_trc(bfa, link_e2e_beacon); - bfa_trc(bfa, pport->beacon); - bfa_trc(bfa, pport->link_e2e_beacon); + bfa_trc(bfa, fcport->beacon); + bfa_trc(bfa, fcport->link_e2e_beacon); - pport->beacon = beacon; - pport->link_e2e_beacon = link_e2e_beacon; + fcport->beacon = beacon; + fcport->link_e2e_beacon = link_e2e_beacon; } bfa_boolean_t -bfa_pport_is_linkup(struct bfa_s *bfa) +bfa_fcport_is_linkup(struct bfa_s *bfa) { - return bfa_sm_cmp_state(BFA_PORT_MOD(bfa), bfa_pport_sm_linkup); + return bfa_sm_cmp_state(BFA_FCPORT_MOD(bfa), bfa_fcport_sm_linkup); } diff --git a/drivers/scsi/bfa/bfa_fcs_port.c b/drivers/scsi/bfa/bfa_fcs_port.c index 53808d0418a1..3c27788cd527 100644 --- a/drivers/scsi/bfa/bfa_fcs_port.c +++ b/drivers/scsi/bfa/bfa_fcs_port.c @@ -57,5 +57,5 @@ bfa_fcs_pport_event_handler(void *cbarg, bfa_pport_event_t event) void bfa_fcs_pport_attach(struct bfa_fcs_s *fcs) { - bfa_pport_event_register(fcs->bfa, bfa_fcs_pport_event_handler, fcs); + bfa_fcport_event_register(fcs->bfa, bfa_fcs_pport_event_handler, fcs); } diff --git a/drivers/scsi/bfa/bfa_module.c b/drivers/scsi/bfa/bfa_module.c index 32eda8e1ec65..a7fcc80c177e 100644 --- a/drivers/scsi/bfa/bfa_module.c +++ b/drivers/scsi/bfa/bfa_module.c @@ -24,7 +24,7 @@ */ struct bfa_module_s *hal_mods[] = { &hal_mod_sgpg, - &hal_mod_pport, + &hal_mod_fcport, &hal_mod_fcxp, &hal_mod_lps, &hal_mod_uf, @@ -45,7 +45,7 @@ bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = { bfa_isr_unhandled, /* BFI_MC_DIAG */ bfa_isr_unhandled, /* BFI_MC_FLASH */ bfa_isr_unhandled, /* BFI_MC_CEE */ - bfa_pport_isr, /* BFI_MC_PORT */ + bfa_fcport_isr, /* BFI_MC_FCPORT */ bfa_isr_unhandled, /* BFI_MC_IOCFC */ bfa_isr_unhandled, /* BFI_MC_LL */ bfa_uf_isr, /* BFI_MC_UF */ diff --git a/drivers/scsi/bfa/bfa_modules_priv.h b/drivers/scsi/bfa/bfa_modules_priv.h index 96f70534593c..f554c2fad6a9 100644 --- a/drivers/scsi/bfa/bfa_modules_priv.h +++ b/drivers/scsi/bfa/bfa_modules_priv.h @@ -29,7 +29,7 @@ struct bfa_modules_s { - struct bfa_pport_s pport; /* physical port module */ + struct bfa_fcport_s fcport; /* fc port module */ struct bfa_fcxp_mod_s fcxp_mod; /* fcxp module */ struct bfa_lps_mod_s lps_mod; /* fcxp module */ struct bfa_uf_mod_s uf_mod; /* unsolicited frame module */ diff --git a/drivers/scsi/bfa/bfa_port_priv.h b/drivers/scsi/bfa/bfa_port_priv.h index f29701bd2369..6d315ffb1e99 100644 --- a/drivers/scsi/bfa/bfa_port_priv.h +++ b/drivers/scsi/bfa/bfa_port_priv.h @@ -25,17 +25,17 @@ /** * Link notification data structure */ -struct bfa_pport_ln_s { - struct bfa_pport_s *pport; +struct bfa_fcport_ln_s { + struct bfa_fcport_s *fcport; bfa_sm_t sm; struct bfa_cb_qe_s ln_qe; /* BFA callback queue elem for ln */ enum bfa_pport_linkstate ln_event; /* ln event for callback */ }; /** - * BFA physical port data structure + * BFA FC port data structure */ -struct bfa_pport_s { +struct bfa_fcport_s { struct bfa_s *bfa; /* parent BFA instance */ bfa_sm_t sm; /* port state machine */ wwn_t nwwn; /* node wwn of physical port */ @@ -62,7 +62,7 @@ struct bfa_pport_s { union bfi_pport_i2h_msg_u i2hmsg; } event_arg; void *bfad; /* BFA driver handle */ - struct bfa_pport_ln_s ln; /* Link Notification */ + struct bfa_fcport_ln_s ln; /* Link Notification */ struct bfa_cb_qe_s hcb_qe; /* BFA callback queue elem */ u32 msgtag; /* fimrware msg tag for reply */ u8 *stats_kva; @@ -88,12 +88,17 @@ struct bfa_pport_s { /* driver callback function */ void *stats_cbarg; /* *!< user callback arg */ + /* FCport stats */ + u8 *fcport_stats_kva; + u64 fcport_stats_pa; + union bfa_fcport_stats_u *fcport_stats; + union bfa_fcport_stats_u *fcport_stats_ret; }; -#define BFA_PORT_MOD(__bfa) (&(__bfa)->modules.pport) +#define BFA_FCPORT_MOD(__bfa) (&(__bfa)->modules.fcport) /* * public functions */ -void bfa_pport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); +void bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); #endif /* __BFA_PORT_PRIV_H__ */ diff --git a/drivers/scsi/bfa/bfa_priv.h b/drivers/scsi/bfa/bfa_priv.h index 0747a6b26f7b..be80fc7e1b0e 100644 --- a/drivers/scsi/bfa/bfa_priv.h +++ b/drivers/scsi/bfa/bfa_priv.h @@ -101,7 +101,7 @@ extern bfa_boolean_t bfa_auto_recover; extern struct bfa_module_s hal_mod_flash; extern struct bfa_module_s hal_mod_fcdiag; extern struct bfa_module_s hal_mod_sgpg; -extern struct bfa_module_s hal_mod_pport; +extern struct bfa_module_s hal_mod_fcport; extern struct bfa_module_s hal_mod_fcxp; extern struct bfa_module_s hal_mod_lps; extern struct bfa_module_s hal_mod_uf; diff --git a/drivers/scsi/bfa/bfa_trcmod_priv.h b/drivers/scsi/bfa/bfa_trcmod_priv.h index 3d947d47b837..a7a82610db85 100644 --- a/drivers/scsi/bfa/bfa_trcmod_priv.h +++ b/drivers/scsi/bfa/bfa_trcmod_priv.h @@ -37,7 +37,7 @@ enum { BFA_TRC_HAL_IOIM = 6, BFA_TRC_HAL_TSKIM = 7, BFA_TRC_HAL_ITNIM = 8, - BFA_TRC_HAL_PPORT = 9, + BFA_TRC_HAL_FCPORT = 9, BFA_TRC_HAL_SGPG = 10, BFA_TRC_HAL_FLASH = 11, BFA_TRC_HAL_DEBUG = 12, diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index 79956c152af9..a8a529d5fea4 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -664,7 +664,7 @@ bfad_fcs_port_cfg(struct bfad_s *bfad) sprintf(symname, "%s-%d", BFAD_DRIVER_NAME, bfad->inst_no); memcpy(port_cfg.sym_name.symname, symname, strlen(symname)); - bfa_pport_get_attr(&bfad->bfa, &attr); + bfa_fcport_get_attr(&bfad->bfa, &attr); port_cfg.nwwn = attr.nwwn; port_cfg.pwwn = attr.pwwn; diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c index adf801dbfa15..a691133c31a2 100644 --- a/drivers/scsi/bfa/bfad_attr.c +++ b/drivers/scsi/bfa/bfad_attr.c @@ -141,7 +141,7 @@ bfad_im_get_host_port_type(struct Scsi_Host *shost) struct bfad_s *bfad = im_port->bfad; struct bfa_pport_attr_s attr; - bfa_pport_get_attr(&bfad->bfa, &attr); + bfa_fcport_get_attr(&bfad->bfa, &attr); switch (attr.port_type) { case BFA_PPORT_TYPE_NPORT: @@ -173,7 +173,7 @@ bfad_im_get_host_port_state(struct Scsi_Host *shost) struct bfad_s *bfad = im_port->bfad; struct bfa_pport_attr_s attr; - bfa_pport_get_attr(&bfad->bfa, &attr); + bfa_fcport_get_attr(&bfad->bfa, &attr); switch (attr.port_state) { case BFA_PPORT_ST_LINKDOWN: @@ -232,7 +232,7 @@ bfad_im_get_host_speed(struct Scsi_Host *shost) unsigned long flags; spin_lock_irqsave(shost->host_lock, flags); - bfa_pport_get_attr(&bfad->bfa, &attr); + bfa_fcport_get_attr(&bfad->bfa, &attr); switch (attr.speed) { case BFA_PPORT_SPEED_8GBPS: fc_host_speed(shost) = FC_PORTSPEED_8GBIT; diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c index f788c2a0ab07..23390b40b9c3 100644 --- a/drivers/scsi/bfa/bfad_im.c +++ b/drivers/scsi/bfa/bfad_im.c @@ -966,7 +966,7 @@ bfad_os_fc_host_init(struct bfad_im_port_s *im_port) FC_PORTSPEED_1GBIT; memset(&attr.pattr, 0, sizeof(attr.pattr)); - bfa_pport_get_attr(&bfad->bfa, &attr.pattr); + bfa_fcport_get_attr(&bfad->bfa, &attr.pattr); fc_host_maxframe_size(host) = attr.pattr.pport_cfg.maxfrsize; } diff --git a/drivers/scsi/bfa/fabric.c b/drivers/scsi/bfa/fabric.c index b02ed7653eb6..e1a4b312e9d4 100644 --- a/drivers/scsi/bfa/fabric.c +++ b/drivers/scsi/bfa/fabric.c @@ -37,7 +37,7 @@ BFA_TRC_FILE(FCS, FABRIC); #define BFA_FCS_FABRIC_CLEANUP_DELAY (10000) /* Milliseconds */ #define bfa_fcs_fabric_set_opertype(__fabric) do { \ - if (bfa_pport_get_topology((__fabric)->fcs->bfa) \ + if (bfa_fcport_get_topology((__fabric)->fcs->bfa) \ == BFA_PPORT_TOPOLOGY_P2P) \ (__fabric)->oper_type = BFA_PPORT_TYPE_NPORT; \ else \ @@ -160,7 +160,7 @@ bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric, switch (event) { case BFA_FCS_FABRIC_SM_START: - if (bfa_pport_is_linkup(fabric->fcs->bfa)) { + if (bfa_fcport_is_linkup(fabric->fcs->bfa)) { bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi); bfa_fcs_fabric_login(fabric); } else @@ -224,7 +224,7 @@ bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric, switch (event) { case BFA_FCS_FABRIC_SM_CONT_OP: - bfa_pport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); + bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); fabric->fab_type = BFA_FCS_FABRIC_SWITCHED; if (fabric->auth_reqd && fabric->is_auth) { @@ -251,7 +251,7 @@ bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric, case BFA_FCS_FABRIC_SM_NO_FABRIC: fabric->fab_type = BFA_FCS_FABRIC_N2N; - bfa_pport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); + bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); bfa_fcs_fabric_notify_online(fabric); bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_nofabric); break; @@ -418,7 +418,7 @@ bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric, case BFA_FCS_FABRIC_SM_NO_FABRIC: bfa_trc(fabric->fcs, fabric->bb_credit); - bfa_pport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); + bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit); break; default: @@ -718,10 +718,10 @@ bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric) struct bfa_port_cfg_s *pcfg = &fabric->bport.port_cfg; u8 alpa = 0; - if (bfa_pport_get_topology(bfa) == BFA_PPORT_TOPOLOGY_LOOP) - alpa = bfa_pport_get_myalpa(bfa); + if (bfa_fcport_get_topology(bfa) == BFA_PPORT_TOPOLOGY_LOOP) + alpa = bfa_fcport_get_myalpa(bfa); - bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_pport_get_maxfrsize(bfa), + bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_fcport_get_maxfrsize(bfa), pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd); fabric->stats.flogi_sent++; @@ -1176,8 +1176,8 @@ bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric) reqlen = fc_flogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), bfa_os_hton3b(FC_FABRIC_PORT), n2n_port->reply_oxid, pcfg->pwwn, - pcfg->nwwn, bfa_pport_get_maxfrsize(bfa), - bfa_pport_get_rx_bbcredit(bfa)); + pcfg->nwwn, bfa_fcport_get_maxfrsize(bfa), + bfa_fcport_get_rx_bbcredit(bfa)); bfa_fcxp_send(fcxp, NULL, fabric->vf_id, bfa_lps_get_tag(fabric->lps), BFA_FALSE, FC_CLASS_3, reqlen, &fchs, diff --git a/drivers/scsi/bfa/fdmi.c b/drivers/scsi/bfa/fdmi.c index e8120868701b..2c9e7132a368 100644 --- a/drivers/scsi/bfa/fdmi.c +++ b/drivers/scsi/bfa/fdmi.c @@ -1175,7 +1175,7 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_port_fdmi_s *fdmi, /* * get pport attributes from hal */ - bfa_pport_get_attr(port->fcs->bfa, &pport_attr); + bfa_fcport_get_attr(port->fcs->bfa, &pport_attr); /* * get FC4 type Bitmask diff --git a/drivers/scsi/bfa/include/bfa_svc.h b/drivers/scsi/bfa/include/bfa_svc.h index 71ffb75a71ca..f2c30858900b 100644 --- a/drivers/scsi/bfa/include/bfa_svc.h +++ b/drivers/scsi/bfa/include/bfa_svc.h @@ -26,6 +26,7 @@ struct bfa_fcxp_s; #include #include #include +#include #include #include @@ -151,60 +152,67 @@ struct bfa_lps_s { bfa_eproto_status_t ext_status; }; +#define BFA_FCPORT(_bfa) (&((_bfa)->modules.port)) + /* * bfa pport API functions */ -bfa_status_t bfa_pport_enable(struct bfa_s *bfa); -bfa_status_t bfa_pport_disable(struct bfa_s *bfa); -bfa_status_t bfa_pport_cfg_speed(struct bfa_s *bfa, +bfa_status_t bfa_fcport_enable(struct bfa_s *bfa); +bfa_status_t bfa_fcport_disable(struct bfa_s *bfa); +bfa_status_t bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_pport_speed speed); -enum bfa_pport_speed bfa_pport_get_speed(struct bfa_s *bfa); -bfa_status_t bfa_pport_cfg_topology(struct bfa_s *bfa, +enum bfa_pport_speed bfa_fcport_get_speed(struct bfa_s *bfa); +bfa_status_t bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_pport_topology topo); -enum bfa_pport_topology bfa_pport_get_topology(struct bfa_s *bfa); -bfa_status_t bfa_pport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa); -bfa_boolean_t bfa_pport_get_hardalpa(struct bfa_s *bfa, u8 *alpa); -u8 bfa_pport_get_myalpa(struct bfa_s *bfa); -bfa_status_t bfa_pport_clr_hardalpa(struct bfa_s *bfa); -bfa_status_t bfa_pport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxsize); -u16 bfa_pport_get_maxfrsize(struct bfa_s *bfa); -u32 bfa_pport_mypid(struct bfa_s *bfa); -u8 bfa_pport_get_rx_bbcredit(struct bfa_s *bfa); -bfa_status_t bfa_pport_trunk_enable(struct bfa_s *bfa, u8 bitmap); -bfa_status_t bfa_pport_trunk_disable(struct bfa_s *bfa); -bfa_boolean_t bfa_pport_trunk_query(struct bfa_s *bfa, u32 *bitmap); -void bfa_pport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr); -wwn_t bfa_pport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node); +enum bfa_pport_topology bfa_fcport_get_topology(struct bfa_s *bfa); +bfa_status_t bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa); +bfa_boolean_t bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa); +u8 bfa_fcport_get_myalpa(struct bfa_s *bfa); +bfa_status_t bfa_fcport_clr_hardalpa(struct bfa_s *bfa); +bfa_status_t bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxsize); +u16 bfa_fcport_get_maxfrsize(struct bfa_s *bfa); +u32 bfa_fcport_mypid(struct bfa_s *bfa); +u8 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa); +bfa_status_t bfa_fcport_trunk_enable(struct bfa_s *bfa, u8 bitmap); +bfa_status_t bfa_fcport_trunk_disable(struct bfa_s *bfa); +bfa_boolean_t bfa_fcport_trunk_query(struct bfa_s *bfa, u32 *bitmap); +void bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr); +wwn_t bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node); bfa_status_t bfa_pport_get_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats, bfa_cb_pport_t cbfn, void *cbarg); bfa_status_t bfa_pport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg); -void bfa_pport_event_register(struct bfa_s *bfa, +void bfa_fcport_event_register(struct bfa_s *bfa, void (*event_cbfn) (void *cbarg, bfa_pport_event_t event), void *event_cbarg); -bfa_boolean_t bfa_pport_is_disabled(struct bfa_s *bfa); -void bfa_pport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off); -void bfa_pport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off); -bfa_status_t bfa_pport_cfg_ratelim_speed(struct bfa_s *bfa, +bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa); +void bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off); +void bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off); +bfa_status_t bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_pport_speed speed); -enum bfa_pport_speed bfa_pport_get_ratelim_speed(struct bfa_s *bfa); +enum bfa_pport_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa); -void bfa_pport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit); -void bfa_pport_busy(struct bfa_s *bfa, bfa_boolean_t status); -void bfa_pport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon, +void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit); +void bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status); +void bfa_fcport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon, bfa_boolean_t link_e2e_beacon); void bfa_cb_pport_event(void *cbarg, bfa_pport_event_t event); -void bfa_pport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr); -void bfa_pport_qos_get_vc_attr(struct bfa_s *bfa, +void bfa_fcport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr); +void bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa, struct bfa_qos_vc_attr_s *qos_vc_attr); -bfa_status_t bfa_pport_get_qos_stats(struct bfa_s *bfa, +bfa_status_t bfa_fcport_get_qos_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats, bfa_cb_pport_t cbfn, void *cbarg); -bfa_status_t bfa_pport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, +bfa_status_t bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg); -bfa_boolean_t bfa_pport_is_ratelim(struct bfa_s *bfa); -bfa_boolean_t bfa_pport_is_linkup(struct bfa_s *bfa); +bfa_boolean_t bfa_fcport_is_ratelim(struct bfa_s *bfa); +bfa_boolean_t bfa_fcport_is_linkup(struct bfa_s *bfa); +bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa, + union bfa_fcport_stats_u *stats, + bfa_cb_pport_t cbfn, void *cbarg); +bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, + void *cbarg); /* * bfa rport API functions diff --git a/drivers/scsi/bfa/include/bfi/bfi_pport.h b/drivers/scsi/bfa/include/bfi/bfi_pport.h index c96d246851af..5c3d289d986d 100644 --- a/drivers/scsi/bfa/include/bfi/bfi_pport.h +++ b/drivers/scsi/bfa/include/bfi/bfi_pport.h @@ -32,6 +32,8 @@ enum bfi_pport_h2i { BFI_PPORT_H2I_ENABLE_TX_VF_TAG_REQ = (7), BFI_PPORT_H2I_GET_QOS_STATS_REQ = (8), BFI_PPORT_H2I_CLEAR_QOS_STATS_REQ = (9), + BFI_FCPORT_H2I_GET_STATS_REQ = (10), + BFI_FCPORT_H2I_CLEAR_STATS_REQ = (11), }; enum bfi_pport_i2h { @@ -45,6 +47,8 @@ enum bfi_pport_i2h { BFI_PPORT_I2H_EVENT = BFA_I2HM(8), BFI_PPORT_I2H_GET_QOS_STATS_RSP = BFA_I2HM(9), BFI_PPORT_I2H_CLEAR_QOS_STATS_RSP = BFA_I2HM(10), + BFI_FCPORT_I2H_GET_STATS_RSP = BFA_I2HM(11), + BFI_FCPORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(12), }; /** @@ -75,6 +79,7 @@ struct bfi_pport_enable_req_s { wwn_t pwwn; /* port wwn of physical port */ struct bfa_pport_cfg_s port_cfg; /* port configuration */ union bfi_addr_u stats_dma_addr; /* DMA address for stats */ + union bfi_addr_u fcport_stats_dma_addr;/*!< DMA address for stats */ u32 msgtag; /* msgtag for reply */ u32 rsvd2; }; diff --git a/drivers/scsi/bfa/include/cs/bfa_sm.h b/drivers/scsi/bfa/include/cs/bfa_sm.h index b0a92baf6657..11fba9082f05 100644 --- a/drivers/scsi/bfa/include/cs/bfa_sm.h +++ b/drivers/scsi/bfa/include/cs/bfa_sm.h @@ -23,6 +23,14 @@ #define __BFA_SM_H__ typedef void (*bfa_sm_t)(void *sm, int event); +/** + * oc - object class eg. bfa_ioc + * st - state, eg. reset + * otype - object type, eg. struct bfa_ioc_s + * etype - object type, eg. enum ioc_event + */ +#define bfa_sm_state_decl(oc, st, otype, etype) \ + static void oc ## _sm_ ## st(otype * fsm, etype event) #define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state)) #define bfa_sm_send_event(_sm, _event) ((_sm)->sm((_sm), (_event))) diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_ethport.h b/drivers/scsi/bfa/include/defs/bfa_defs_ethport.h index 79f9b3e146f7..b4fa0923aa89 100644 --- a/drivers/scsi/bfa/include/defs/bfa_defs_ethport.h +++ b/drivers/scsi/bfa/include/defs/bfa_defs_ethport.h @@ -19,6 +19,7 @@ #define __BFA_DEFS_ETHPORT_H__ #include +#include #include #include #include diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_fcport.h b/drivers/scsi/bfa/include/defs/bfa_defs_fcport.h new file mode 100644 index 000000000000..a07ef4a3cd78 --- /dev/null +++ b/drivers/scsi/bfa/include/defs/bfa_defs_fcport.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. + * All rights reserved + * www.brocade.com + * + * bfa_defs_fcport.h + * + * Linux driver for Brocade Fibre Channel Host Bus Adapter. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License (GPL) Version 2 as + * published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#ifndef __BFA_DEFS_FCPORT_H__ +#define __BFA_DEFS_FCPORT_H__ + +#include +#include + +#pragma pack(1) + +/** + * FCoE statistics + */ +struct bfa_fcoe_stats_s { + u64 secs_reset; /* Seconds since stats reset */ + u64 cee_linkups; /* CEE link up */ + u64 cee_linkdns; /* CEE link down */ + u64 fip_linkups; /* FIP link up */ + u64 fip_linkdns; /* FIP link down */ + u64 fip_fails; /* FIP failures */ + u64 mac_invalids; /* Invalid mac assignments */ + u64 vlan_req; /* Vlan requests */ + u64 vlan_notify; /* Vlan notifications */ + u64 vlan_err; /* Vlan notification errors */ + u64 vlan_timeouts; /* Vlan request timeouts */ + u64 vlan_invalids; /* Vlan invalids */ + u64 disc_req; /* Discovery requests */ + u64 disc_rsp; /* Discovery responses */ + u64 disc_err; /* Discovery error frames */ + u64 disc_unsol; /* Discovery unsolicited */ + u64 disc_timeouts; /* Discovery timeouts */ + u64 disc_fcf_unavail; /* Discovery FCF not avail */ + u64 linksvc_unsupp; /* FIP link service req unsupp. */ + u64 linksvc_err; /* FIP link service req errors */ + u64 logo_req; /* FIP logo */ + u64 clrvlink_req; /* Clear virtual link requests */ + u64 op_unsupp; /* FIP operation unsupp. */ + u64 untagged; /* FIP untagged frames */ + u64 txf_ucast; /* Tx FCoE unicast frames */ + u64 txf_ucast_vlan; /* Tx FCoE unicast vlan frames */ + u64 txf_ucast_octets; /* Tx FCoE unicast octets */ + u64 txf_mcast; /* Tx FCoE mutlicast frames */ + u64 txf_mcast_vlan; /* Tx FCoE mutlicast vlan frames */ + u64 txf_mcast_octets; /* Tx FCoE multicast octets */ + u64 txf_bcast; /* Tx FCoE broadcast frames */ + u64 txf_bcast_vlan; /* Tx FCoE broadcast vlan frames */ + u64 txf_bcast_octets; /* Tx FCoE broadcast octets */ + u64 txf_timeout; /* Tx timeouts */ + u64 txf_parity_errors; /* Transmit parity err */ + u64 txf_fid_parity_errors; /* Transmit FID parity err */ + u64 tx_pause; /* Tx pause frames */ + u64 tx_zero_pause; /* Tx zero pause frames */ + u64 tx_first_pause; /* Tx first pause frames */ + u64 rx_pause; /* Rx pause frames */ + u64 rx_zero_pause; /* Rx zero pause frames */ + u64 rx_first_pause; /* Rx first pause frames */ + u64 rxf_ucast_octets; /* Rx unicast octets */ + u64 rxf_ucast; /* Rx unicast frames */ + u64 rxf_ucast_vlan; /* Rx unicast vlan frames */ + u64 rxf_mcast_octets; /* Rx multicast octets */ + u64 rxf_mcast; /* Rx multicast frames */ + u64 rxf_mcast_vlan; /* Rx multicast vlan frames */ + u64 rxf_bcast_octets; /* Rx broadcast octests */ + u64 rxf_bcast; /* Rx broadcast frames */ + u64 rxf_bcast_vlan; /* Rx broadcast vlan frames */ +}; + +/** + * QoS or FCoE stats (fcport stats excluding physical FC port stats) + */ +union bfa_fcport_stats_u { + struct bfa_qos_stats_s fcqos; + struct bfa_fcoe_stats_s fcoe; +}; + +#pragma pack() + +#endif /* __BFA_DEFS_FCPORT_H__ */ diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h b/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h index 87f0401c6439..c290fb13d2d1 100644 --- a/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h +++ b/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h @@ -236,6 +236,7 @@ struct bfa_fw_fip_stats_s { u32 disc_err; /* Discovery advt. parse errors */ u32 disc_unsol; /* Discovery unsolicited */ u32 disc_timeouts; /* Discovery timeouts */ + u32 disc_fcf_unavail; /* Discovery FCF Not Avail. */ u32 linksvc_unsupp; /* Unsupported link service req */ u32 linksvc_err; /* Parse error in link service req */ u32 logo_req; /* Number of FIP logos received */ diff --git a/drivers/scsi/bfa/loop.c b/drivers/scsi/bfa/loop.c index f7c7f4f3c640..f6342efb6a90 100644 --- a/drivers/scsi/bfa/loop.c +++ b/drivers/scsi/bfa/loop.c @@ -162,7 +162,7 @@ bfa_fcs_port_loop_send_plogi(struct bfa_fcs_port_s *port, u8 alpa) len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), alpa, bfa_fcs_port_get_fcid(port), 0, port->port_cfg.pwwn, port->port_cfg.nwwn, - bfa_pport_get_maxfrsize(port->fcs->bfa)); + bfa_fcport_get_maxfrsize(port->fcs->bfa)); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, diff --git a/drivers/scsi/bfa/lport_api.c b/drivers/scsi/bfa/lport_api.c index 4a4ccce99364..d3907d184e2b 100644 --- a/drivers/scsi/bfa/lport_api.c +++ b/drivers/scsi/bfa/lport_api.c @@ -156,7 +156,7 @@ bfa_fcs_port_get_rport_max_speed(struct bfa_fcs_port_s *port) /* * Get Physical port's current speed */ - bfa_pport_get_attr(port->fcs->bfa, &pport_attr); + bfa_fcport_get_attr(port->fcs->bfa, &pport_attr); pport_speed = pport_attr.speed; bfa_trc(fcs, pport_speed); diff --git a/drivers/scsi/bfa/ms.c b/drivers/scsi/bfa/ms.c index f0275a409fd2..e6db5fd301f2 100644 --- a/drivers/scsi/bfa/ms.c +++ b/drivers/scsi/bfa/ms.c @@ -637,7 +637,7 @@ bfa_fcs_port_ms_send_plogi(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced) bfa_os_hton3b(FC_MGMT_SERVER), bfa_fcs_port_get_fcid(port), 0, port->port_cfg.pwwn, port->port_cfg.nwwn, - bfa_pport_get_maxfrsize(port->fcs->bfa)); + bfa_fcport_get_maxfrsize(port->fcs->bfa)); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, bfa_fcs_port_ms_plogi_response, diff --git a/drivers/scsi/bfa/ns.c b/drivers/scsi/bfa/ns.c index 6de06a557e2d..d20dd7e15742 100644 --- a/drivers/scsi/bfa/ns.c +++ b/drivers/scsi/bfa/ns.c @@ -660,7 +660,7 @@ bfa_fcs_port_ns_send_plogi(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced) bfa_os_hton3b(FC_NAME_SERVER), bfa_fcs_port_get_fcid(port), 0, port->port_cfg.pwwn, port->port_cfg.nwwn, - bfa_pport_get_maxfrsize(port->fcs->bfa)); + bfa_fcport_get_maxfrsize(port->fcs->bfa)); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_plogi_response, diff --git a/drivers/scsi/bfa/rport.c b/drivers/scsi/bfa/rport.c index 80592e352226..8c5969c86934 100644 --- a/drivers/scsi/bfa/rport.c +++ b/drivers/scsi/bfa/rport.c @@ -1373,7 +1373,7 @@ bfa_fcs_rport_send_plogi(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced) len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, bfa_fcs_port_get_fcid(port), 0, port->port_cfg.pwwn, port->port_cfg.nwwn, - bfa_pport_get_maxfrsize(port->fcs->bfa)); + bfa_fcport_get_maxfrsize(port->fcs->bfa)); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, bfa_fcs_rport_plogi_response, @@ -1485,7 +1485,7 @@ bfa_fcs_rport_send_plogiacc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced) len = fc_plogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid, bfa_fcs_port_get_fcid(port), rport->reply_oxid, port->port_cfg.pwwn, port->port_cfg.nwwn, - bfa_pport_get_maxfrsize(port->fcs->bfa)); + bfa_fcport_get_maxfrsize(port->fcs->bfa)); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); @@ -1820,7 +1820,7 @@ bfa_fcs_rport_process_rpsc(struct bfa_fcs_rport_s *rport, /* * get curent speed from pport attributes from BFA */ - bfa_pport_get_attr(port->fcs->bfa, &pport_attr); + bfa_fcport_get_attr(port->fcs->bfa, &pport_attr); speeds.port_op_speed = fc_bfa_speed_to_rpsc_operspeed(pport_attr.speed); @@ -2171,7 +2171,7 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi) bfa_trc(port->fcs, port->fabric->bb_credit); port->fabric->bb_credit = bfa_os_ntohs(plogi->csp.bbcred); - bfa_pport_set_tx_bbcredit(port->fcs->bfa, + bfa_fcport_set_tx_bbcredit(port->fcs->bfa, port->fabric->bb_credit); } diff --git a/drivers/scsi/bfa/rport_api.c b/drivers/scsi/bfa/rport_api.c index 3dae1774181e..a441f41d2a64 100644 --- a/drivers/scsi/bfa/rport_api.c +++ b/drivers/scsi/bfa/rport_api.c @@ -102,7 +102,7 @@ bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport, rport_attr->qos_attr = qos_attr; rport_attr->trl_enforced = BFA_FALSE; - if (bfa_pport_is_ratelim(port->fcs->bfa)) { + if (bfa_fcport_is_ratelim(port->fcs->bfa)) { if ((rport->rpf.rpsc_speed == BFA_PPORT_SPEED_UNKNOWN) || (rport->rpf.rpsc_speed < bfa_fcs_port_get_rport_max_speed(port))) diff --git a/drivers/scsi/bfa/vport.c b/drivers/scsi/bfa/vport.c index 14fb7ac2bfbc..c5e534eb1620 100644 --- a/drivers/scsi/bfa/vport.c +++ b/drivers/scsi/bfa/vport.c @@ -478,7 +478,7 @@ static void bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport) { bfa_lps_fdisc(vport->lps, vport, - bfa_pport_get_maxfrsize(__vport_bfa(vport)), + bfa_fcport_get_maxfrsize(__vport_bfa(vport)), __vport_pwwn(vport), __vport_nwwn(vport)); vport->vport_stats.fdisc_sent++; } -- cgit v1.2.3 From f58e9ebbf78bd36c6cf1ca651280d39efe73a7c0 Mon Sep 17 00:00:00 2001 From: Krishna Gudipati Date: Fri, 5 Mar 2010 19:37:45 -0800 Subject: [SCSI] bfa: New portlog entries for events (FIP/FLOGI/FDISC/LOGO). Signed-off-by: Krishna Gudipati Signed-off-by: James Bottomley --- drivers/scsi/bfa/bfa_fcport.c | 21 ++++++++++++++++---- drivers/scsi/bfa/bfa_lps.c | 27 ++++++++++++++++++++++++-- drivers/scsi/bfa/include/defs/bfa_defs_pport.h | 21 ++++++++++++++++++-- drivers/scsi/bfa/include/protocol/fc.h | 5 +++++ 4 files changed, 66 insertions(+), 8 deletions(-) diff --git a/drivers/scsi/bfa/bfa_fcport.c b/drivers/scsi/bfa/bfa_fcport.c index f392a7fa8c7f..a48413c230a5 100644 --- a/drivers/scsi/bfa/bfa_fcport.c +++ b/drivers/scsi/bfa/bfa_fcport.c @@ -326,6 +326,7 @@ static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { + struct bfi_pport_event_s *pevent = fcport->event_arg.i2hmsg.event; bfa_trc(fcport->bfa, event); switch (event) { @@ -335,6 +336,22 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport, bfa_assert(fcport->event_cbfn); bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup"); + + if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) { + + bfa_trc(fcport->bfa, pevent->link_state.fcf.fipenabled); + bfa_trc(fcport->bfa, pevent->link_state.fcf.fipfailed); + + if (pevent->link_state.fcf.fipfailed) + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, + BFA_PL_EID_FIP_FCF_DISC, 0, + "FIP FCF Discovery Failed"); + else + bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, + BFA_PL_EID_FIP_FCF_DISC, 0, + "FIP FCF Discovered"); + } + bfa_fcport_callback(fcport, BFA_PPORT_LINKUP); bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE); /** @@ -1500,8 +1517,6 @@ bfa_port_stats_query(void *cbarg) bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_GET_STATS_REQ, bfa_lpuid(fcport->bfa)); bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); - - return; } static void @@ -1526,7 +1541,6 @@ bfa_port_stats_clear(void *cbarg) bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_CLEAR_STATS_REQ, bfa_lpuid(fcport->bfa)); bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); - return; } static void @@ -1599,7 +1613,6 @@ bfa_port_qos_stats_clear(void *cbarg) bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_CLEAR_QOS_STATS_REQ, bfa_lpuid(fcport->bfa)); bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); - return; } static void diff --git a/drivers/scsi/bfa/bfa_lps.c b/drivers/scsi/bfa/bfa_lps.c index 730616f6e671..d2d48a619c30 100644 --- a/drivers/scsi/bfa/bfa_lps.c +++ b/drivers/scsi/bfa/bfa_lps.c @@ -99,6 +99,12 @@ bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event) bfa_sm_set_state(lps, bfa_lps_sm_login); bfa_lps_send_login(lps); } + if (lps->fdisc) + bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, + BFA_PL_EID_LOGIN, 0, "FDISC Request"); + else + bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, + BFA_PL_EID_LOGIN, 0, "FLOGI Request"); break; case BFA_LPS_SM_LOGOUT: @@ -136,10 +142,25 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event) switch (event) { case BFA_LPS_SM_FWRSP: - if (lps->status == BFA_STATUS_OK) + if (lps->status == BFA_STATUS_OK) { bfa_sm_set_state(lps, bfa_lps_sm_online); - else + if (lps->fdisc) + bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, + BFA_PL_EID_LOGIN, 0, "FDISC Accept"); + else + bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, + BFA_PL_EID_LOGIN, 0, "FLOGI Accept"); + } else { bfa_sm_set_state(lps, bfa_lps_sm_init); + if (lps->fdisc) + bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, + BFA_PL_EID_LOGIN, 0, + "FDISC Fail (RJT or timeout)"); + else + bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, + BFA_PL_EID_LOGIN, 0, + "FLOGI Fail (RJT or timeout)"); + } bfa_lps_login_comp(lps); break; @@ -202,6 +223,8 @@ bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event) bfa_sm_set_state(lps, bfa_lps_sm_logout); bfa_lps_send_logout(lps); } + bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, + BFA_PL_EID_LOGO, 0, "Logout"); break; case BFA_LPS_SM_RX_CVL: diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_pport.h b/drivers/scsi/bfa/include/defs/bfa_defs_pport.h index 88662a15a21b..164cfbef9b12 100644 --- a/drivers/scsi/bfa/include/defs/bfa_defs_pport.h +++ b/drivers/scsi/bfa/include/defs/bfa_defs_pport.h @@ -333,8 +333,7 @@ struct bfa_pport_fcpmap_s { }; /** - * Port RNID info. - */ + * Port RNI */ struct bfa_pport_rnid_s { wwn_t wwn; u32 unittype; @@ -347,6 +346,23 @@ struct bfa_pport_rnid_s { u16 topologydiscoveryflags; }; +struct bfa_fcport_fcf_s { + wwn_t name; /* FCF name */ + wwn_t fabric_name; /* Fabric Name */ + u8 fipenabled; /* FIP enabled or not */ + u8 fipfailed; /* FIP failed or not */ + u8 resv[2]; + u8 pri; /* FCF priority */ + u8 version; /* FIP version used */ + u8 available; /* Available for login */ + u8 fka_disabled; /* FKA is disabled */ + u8 maxsz_verified; /* FCoE max size verified */ + u8 fc_map[3]; /* FC map */ + u16 vlan; /* FCoE vlan tag/priority */ + u32 fka_adv_per; /* FIP ka advert. period */ + struct mac_s mac; /* FCF mac */ +}; + /** * Link state information */ @@ -378,6 +394,7 @@ struct bfa_pport_link_s { struct fc_alpabm_s alpabm; /* alpa bitmap */ } loop_info; } tl; + struct bfa_fcport_fcf_s fcf; /*!< FCF information (for FCoE) */ }; #endif /* __BFA_DEFS_PPORT_H__ */ diff --git a/drivers/scsi/bfa/include/protocol/fc.h b/drivers/scsi/bfa/include/protocol/fc.h index 14969eecf6a9..8d1038035a76 100644 --- a/drivers/scsi/bfa/include/protocol/fc.h +++ b/drivers/scsi/bfa/include/protocol/fc.h @@ -50,6 +50,11 @@ struct fchs_s { u32 ro; /* relative offset */ }; + +#define FC_SOF_LEN 4 +#define FC_EOF_LEN 4 +#define FC_CRC_LEN 4 + /* * Fibre Channel BB_E Header Structure */ -- cgit v1.2.3 From 0a4b1fc0b24fc7adbaf8413f2992ce1395991a78 Mon Sep 17 00:00:00 2001 From: Krishna Gudipati Date: Fri, 5 Mar 2010 19:37:57 -0800 Subject: [SCSI] bfa: Replace bfa_get_attr() with specific APIs bfa_ioc_attr_s is a big structure and some times could cause stack overflow if defined locally, so add specific APIs that are needed to replace the use of ioc_attr local var. Signed-off-by: Krishna Gudipati Signed-off-by: James Bottomley --- drivers/scsi/bfa/bfa_fcport.c | 33 ++------ drivers/scsi/bfa/bfa_fcs_lport.c | 26 +----- drivers/scsi/bfa/bfa_ioc.c | 155 +++++++++++++++++++++------------- drivers/scsi/bfa/bfa_ioc.h | 10 +++ drivers/scsi/bfa/bfa_lps.c | 6 +- drivers/scsi/bfa/bfad.c | 4 +- drivers/scsi/bfa/bfad_attr.c | 62 ++++++-------- drivers/scsi/bfa/bfad_drv.h | 13 +++ drivers/scsi/bfa/bfad_im.c | 29 +++---- drivers/scsi/bfa/fabric.c | 10 +-- drivers/scsi/bfa/fcpim.c | 15 +--- drivers/scsi/bfa/include/bfa.h | 20 +++++ drivers/scsi/bfa/include/cs/bfa_log.h | 2 +- drivers/scsi/bfa/rport.c | 7 +- drivers/scsi/bfa/vport.c | 18 +--- 15 files changed, 195 insertions(+), 215 deletions(-) diff --git a/drivers/scsi/bfa/bfa_fcport.c b/drivers/scsi/bfa/bfa_fcport.c index a48413c230a5..0da612010787 100644 --- a/drivers/scsi/bfa/bfa_fcport.c +++ b/drivers/scsi/bfa/bfa_fcport.c @@ -145,35 +145,12 @@ bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event) struct bfa_log_mod_s *logmod = fcport->bfa->logm; wwn_t pwwn = fcport->pwwn; char pwwn_ptr[BFA_STRING_32]; - struct bfa_ioc_attr_s ioc_attr; memset(&aen_data, 0, sizeof(aen_data)); wwn2str(pwwn_ptr, pwwn); - switch (event) { - case BFA_PORT_AEN_ONLINE: - bfa_log(logmod, BFA_AEN_PORT_ONLINE, pwwn_ptr); - break; - case BFA_PORT_AEN_OFFLINE: - bfa_log(logmod, BFA_AEN_PORT_OFFLINE, pwwn_ptr); - break; - case BFA_PORT_AEN_ENABLE: - bfa_log(logmod, BFA_AEN_PORT_ENABLE, pwwn_ptr); - break; - case BFA_PORT_AEN_DISABLE: - bfa_log(logmod, BFA_AEN_PORT_DISABLE, pwwn_ptr); - break; - case BFA_PORT_AEN_DISCONNECT: - bfa_log(logmod, BFA_AEN_PORT_DISCONNECT, pwwn_ptr); - break; - case BFA_PORT_AEN_QOS_NEG: - bfa_log(logmod, BFA_AEN_PORT_QOS_NEG, pwwn_ptr); - break; - default: - break; - } + bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, event), pwwn_ptr); - bfa_ioc_get_attr(&fcport->bfa->ioc, &ioc_attr); - aen_data.port.ioc_type = ioc_attr.ioc_type; + aen_data.port.ioc_type = bfa_get_type(fcport->bfa); aen_data.port.pwwn = pwwn; } @@ -2043,11 +2020,15 @@ void bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa); bfa_trc(bfa, on_off); bfa_trc(bfa, fcport->cfg.qos_enabled); - fcport->cfg.qos_enabled = on_off; + bfa_trc(bfa, ioc_type); + + if (ioc_type == BFA_IOC_TYPE_FC) + fcport->cfg.qos_enabled = on_off; } void diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c index 4a51aac7ab04..7c1251c682d8 100644 --- a/drivers/scsi/bfa/bfa_fcs_lport.c +++ b/drivers/scsi/bfa/bfa_fcs_lport.c @@ -263,30 +263,8 @@ bfa_fcs_port_aen_post(struct bfa_fcs_port_s *port, bfa_assert(role <= BFA_PORT_ROLE_FCP_MAX); - switch (event) { - case BFA_LPORT_AEN_ONLINE: - bfa_log(logmod, BFA_AEN_LPORT_ONLINE, lpwwn_ptr, - role_str[role / 2]); - break; - case BFA_LPORT_AEN_OFFLINE: - bfa_log(logmod, BFA_AEN_LPORT_OFFLINE, lpwwn_ptr, - role_str[role / 2]); - break; - case BFA_LPORT_AEN_NEW: - bfa_log(logmod, BFA_AEN_LPORT_NEW, lpwwn_ptr, - role_str[role / 2]); - break; - case BFA_LPORT_AEN_DELETE: - bfa_log(logmod, BFA_AEN_LPORT_DELETE, lpwwn_ptr, - role_str[role / 2]); - break; - case BFA_LPORT_AEN_DISCONNECT: - bfa_log(logmod, BFA_AEN_LPORT_DISCONNECT, lpwwn_ptr, - role_str[role / 2]); - break; - default: - break; - } + bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, event), lpwwn_ptr, + role_str[role/2]); aen_data.lport.vf_id = port->fabric->vf_id; aen_data.lport.roles = role; diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c index 4d9a47ccffe9..e038bc9769f6 100644 --- a/drivers/scsi/bfa/bfa_ioc.c +++ b/drivers/scsi/bfa/bfa_ioc.c @@ -1679,46 +1679,28 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc, struct bfa_adapter_attr_s *ad_attr) { struct bfi_ioc_attr_s *ioc_attr; - char model[BFA_ADAPTER_MODEL_NAME_LEN]; ioc_attr = ioc->attr; - bfa_os_memcpy((void *)&ad_attr->serial_num, - (void *)ioc_attr->brcd_serialnum, - BFA_ADAPTER_SERIAL_NUM_LEN); - - bfa_os_memcpy(&ad_attr->fw_ver, ioc_attr->fw_version, BFA_VERSION_LEN); - bfa_os_memcpy(&ad_attr->optrom_ver, ioc_attr->optrom_version, - BFA_VERSION_LEN); - bfa_os_memcpy(&ad_attr->manufacturer, BFA_MFG_NAME, - BFA_ADAPTER_MFG_NAME_LEN); + + bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num); + bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver); + bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver); + bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer); bfa_os_memcpy(&ad_attr->vpd, &ioc_attr->vpd, sizeof(struct bfa_mfg_vpd_s)); - ad_attr->nports = BFI_ADAPTER_GETP(NPORTS, ioc_attr->adapter_prop); - ad_attr->max_speed = BFI_ADAPTER_GETP(SPEED, ioc_attr->adapter_prop); + ad_attr->nports = bfa_ioc_get_nports(ioc); + ad_attr->max_speed = bfa_ioc_speed_sup(ioc); - /** - * model name - */ - if (BFI_ADAPTER_GETP(SPEED, ioc_attr->adapter_prop) == 10) { - strcpy(model, "BR-10?0"); - model[5] = '0' + ad_attr->nports; - } else { - strcpy(model, "Brocade-??5"); - model[8] = - '0' + BFI_ADAPTER_GETP(SPEED, ioc_attr->adapter_prop); - model[9] = '0' + ad_attr->nports; - } + bfa_ioc_get_adapter_model(ioc, ad_attr->model); + /* For now, model descr uses same model string */ + bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr); if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop)) ad_attr->prototype = 1; else ad_attr->prototype = 0; - bfa_os_memcpy(&ad_attr->model, model, BFA_ADAPTER_MODEL_NAME_LEN); - bfa_os_memcpy(&ad_attr->model_descr, &ad_attr->model, - BFA_ADAPTER_MODEL_NAME_LEN); - ad_attr->pwwn = bfa_ioc_get_pwwn(ioc); ad_attr->mac = bfa_ioc_get_mac(ioc); @@ -1726,12 +1708,8 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc, ad_attr->pcie_lanes = ioc_attr->pcie_lanes; ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig; ad_attr->asic_rev = ioc_attr->asic_rev; - ad_attr->hw_ver[0] = 'R'; - ad_attr->hw_ver[1] = 'e'; - ad_attr->hw_ver[2] = 'v'; - ad_attr->hw_ver[3] = '-'; - ad_attr->hw_ver[4] = ioc_attr->asic_rev; - ad_attr->hw_ver[5] = '\0'; + + bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver); ad_attr->cna_capable = ioc->cna; } @@ -1751,12 +1729,92 @@ bfa_ioc_get_type(struct bfa_ioc_s *ioc) } } +void +bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num) +{ + bfa_os_memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN); + bfa_os_memcpy((void *)serial_num, + (void *)ioc->attr->brcd_serialnum, + BFA_ADAPTER_SERIAL_NUM_LEN); +} + +void +bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver) +{ + bfa_os_memset((void *)fw_ver, 0, BFA_VERSION_LEN); + bfa_os_memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN); +} + +void +bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev) +{ + bfa_assert(chip_rev); + + bfa_os_memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN); + + chip_rev[0] = 'R'; + chip_rev[1] = 'e'; + chip_rev[2] = 'v'; + chip_rev[3] = '-'; + chip_rev[4] = ioc->attr->asic_rev; + chip_rev[5] = '\0'; +} + +void +bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver) +{ + bfa_os_memset((void *)optrom_ver, 0, BFA_VERSION_LEN); + bfa_os_memcpy(optrom_ver, ioc->attr->optrom_version, + BFA_VERSION_LEN); +} + +void +bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer) +{ + bfa_os_memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN); + bfa_os_memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); +} + +void +bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model) +{ + struct bfi_ioc_attr_s *ioc_attr; + u8 nports; + u8 max_speed; + + bfa_assert(model); + bfa_os_memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN); + + ioc_attr = ioc->attr; + + nports = bfa_ioc_get_nports(ioc); + max_speed = bfa_ioc_speed_sup(ioc); + + /** + * model name + */ + if (max_speed == 10) { + strcpy(model, "BR-10?0"); + model[5] = '0' + nports; + } else { + strcpy(model, "Brocade-??5"); + model[8] = '0' + max_speed; + model[9] = '0' + nports; + } +} + +enum bfa_ioc_state +bfa_ioc_get_state(struct bfa_ioc_s *ioc) +{ + return bfa_sm_to_state(ioc_sm_table, ioc->fsm); +} + void bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr) { bfa_os_memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s)); - ioc_attr->state = bfa_sm_to_state(ioc_sm_table, ioc->fsm); + ioc_attr->state = bfa_ioc_get_state(ioc); ioc_attr->port_id = ioc->port_id; ioc_attr->ioc_type = bfa_ioc_get_type(ioc); @@ -1765,12 +1823,7 @@ bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr) ioc_attr->pci_attr.device_id = ioc->pcidev.device_id; ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func; - ioc_attr->pci_attr.chip_rev[0] = 'R'; - ioc_attr->pci_attr.chip_rev[1] = 'e'; - ioc_attr->pci_attr.chip_rev[2] = 'v'; - ioc_attr->pci_attr.chip_rev[3] = '-'; - ioc_attr->pci_attr.chip_rev[4] = ioc_attr->adapter_attr.asic_rev; - ioc_attr->pci_attr.chip_rev[5] = '\0'; + bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); } /** @@ -1877,25 +1930,7 @@ bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event) s32 inst_num = 0; enum bfa_ioc_type_e ioc_type; - switch (event) { - case BFA_IOC_AEN_HBGOOD: - bfa_log(logmod, BFA_AEN_IOC_HBGOOD, inst_num); - break; - case BFA_IOC_AEN_HBFAIL: - bfa_log(logmod, BFA_AEN_IOC_HBFAIL, inst_num); - break; - case BFA_IOC_AEN_ENABLE: - bfa_log(logmod, BFA_AEN_IOC_ENABLE, inst_num); - break; - case BFA_IOC_AEN_DISABLE: - bfa_log(logmod, BFA_AEN_IOC_DISABLE, inst_num); - break; - case BFA_IOC_AEN_FWMISMATCH: - bfa_log(logmod, BFA_AEN_IOC_FWMISMATCH, inst_num); - break; - default: - break; - } + bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, event), inst_num); memset(&aen_data.ioc.pwwn, 0, sizeof(aen_data.ioc.pwwn)); memset(&aen_data.ioc.mac, 0, sizeof(aen_data.ioc.mac)); diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h index 4b73efad1ee5..d0804406ea1a 100644 --- a/drivers/scsi/bfa/bfa_ioc.h +++ b/drivers/scsi/bfa/bfa_ioc.h @@ -263,6 +263,16 @@ bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc); void bfa_ioc_cfg_complete(struct bfa_ioc_s *ioc); +enum bfa_ioc_type_e bfa_ioc_get_type(struct bfa_ioc_s *ioc); +void bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num); +void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver); +void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver); +void bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model); +void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, + char *manufacturer); +void bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev); +enum bfa_ioc_state bfa_ioc_get_state(struct bfa_ioc_s *ioc); + void bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr); void bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc, struct bfa_adapter_attr_s *ad_attr); diff --git a/drivers/scsi/bfa/bfa_lps.c b/drivers/scsi/bfa/bfa_lps.c index d2d48a619c30..ad06f6189092 100644 --- a/drivers/scsi/bfa/bfa_lps.c +++ b/drivers/scsi/bfa/bfa_lps.c @@ -631,11 +631,7 @@ bfa_lps_cvl_event(struct bfa_lps_s *lps) u32 bfa_lps_get_max_vport(struct bfa_s *bfa) { - struct bfa_ioc_attr_s ioc_attr; - - bfa_get_attr(bfa, &ioc_attr); - - if (ioc_attr.pci_attr.device_id == BFA_PCI_DEVICE_ID_CT) + if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT) return BFA_LPS_MAX_VPORTS_SUPP_CT; else return BFA_LPS_MAX_VPORTS_SUPP_CB; diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index a8a529d5fea4..6bff08ea4029 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -978,7 +978,6 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) { struct bfad_s *bfad; int error = -ENODEV, retval; - char buf[16]; /* * For single port cards - only claim function 0 @@ -1009,8 +1008,7 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) bfa_trc(bfad, bfad_inst); bfad->logmod = &bfad->log_data; - sprintf(buf, "%d", bfad_inst); - bfa_log_init(bfad->logmod, buf, bfa_os_printf); + bfa_log_init(bfad->logmod, (char *)pci_name(pdev), bfa_os_printf); bfad_drv_log_level_set(bfad); diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c index a691133c31a2..dd5cb20d4b37 100644 --- a/drivers/scsi/bfa/bfad_attr.c +++ b/drivers/scsi/bfa/bfad_attr.c @@ -424,12 +424,10 @@ bfad_im_serial_num_show(struct device *dev, struct device_attribute *attr, struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; - struct bfa_ioc_attr_s ioc_attr; + char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN]; - memset(&ioc_attr, 0, sizeof(ioc_attr)); - bfa_get_attr(&bfad->bfa, &ioc_attr); - return snprintf(buf, PAGE_SIZE, "%s\n", - ioc_attr.adapter_attr.serial_num); + bfa_get_adapter_serial_num(&bfad->bfa, serial_num); + return snprintf(buf, PAGE_SIZE, "%s\n", serial_num); } static ssize_t @@ -440,11 +438,10 @@ bfad_im_model_show(struct device *dev, struct device_attribute *attr, struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; - struct bfa_ioc_attr_s ioc_attr; + char model[BFA_ADAPTER_MODEL_NAME_LEN]; - memset(&ioc_attr, 0, sizeof(ioc_attr)); - bfa_get_attr(&bfad->bfa, &ioc_attr); - return snprintf(buf, PAGE_SIZE, "%s\n", ioc_attr.adapter_attr.model); + bfa_get_adapter_model(&bfad->bfa, model); + return snprintf(buf, PAGE_SIZE, "%s\n", model); } static ssize_t @@ -455,12 +452,10 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr, struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; - struct bfa_ioc_attr_s ioc_attr; + char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN]; - memset(&ioc_attr, 0, sizeof(ioc_attr)); - bfa_get_attr(&bfad->bfa, &ioc_attr); - return snprintf(buf, PAGE_SIZE, "%s\n", - ioc_attr.adapter_attr.model_descr); + bfa_get_adapter_model(&bfad->bfa, model_descr); + return snprintf(buf, PAGE_SIZE, "%s\n", model_descr); } static ssize_t @@ -485,14 +480,13 @@ bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr, struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; - struct bfa_ioc_attr_s ioc_attr; - - memset(&ioc_attr, 0, sizeof(ioc_attr)); - bfa_get_attr(&bfad->bfa, &ioc_attr); + char model[BFA_ADAPTER_MODEL_NAME_LEN]; + char fw_ver[BFA_VERSION_LEN]; + bfa_get_adapter_model(&bfad->bfa, model); + bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver); return snprintf(buf, PAGE_SIZE, "Brocade %s FV%s DV%s\n", - ioc_attr.adapter_attr.model, - ioc_attr.adapter_attr.fw_ver, BFAD_DRIVER_VERSION); + model, fw_ver, BFAD_DRIVER_VERSION); } static ssize_t @@ -503,11 +497,10 @@ bfad_im_hw_version_show(struct device *dev, struct device_attribute *attr, struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; - struct bfa_ioc_attr_s ioc_attr; + char hw_ver[BFA_VERSION_LEN]; - memset(&ioc_attr, 0, sizeof(ioc_attr)); - bfa_get_attr(&bfad->bfa, &ioc_attr); - return snprintf(buf, PAGE_SIZE, "%s\n", ioc_attr.adapter_attr.hw_ver); + bfa_get_pci_chip_rev(&bfad->bfa, hw_ver); + return snprintf(buf, PAGE_SIZE, "%s\n", hw_ver); } static ssize_t @@ -525,12 +518,10 @@ bfad_im_optionrom_version_show(struct device *dev, struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; - struct bfa_ioc_attr_s ioc_attr; + char optrom_ver[BFA_VERSION_LEN]; - memset(&ioc_attr, 0, sizeof(ioc_attr)); - bfa_get_attr(&bfad->bfa, &ioc_attr); - return snprintf(buf, PAGE_SIZE, "%s\n", - ioc_attr.adapter_attr.optrom_ver); + bfa_get_adapter_optrom_ver(&bfad->bfa, optrom_ver); + return snprintf(buf, PAGE_SIZE, "%s\n", optrom_ver); } static ssize_t @@ -541,11 +532,10 @@ bfad_im_fw_version_show(struct device *dev, struct device_attribute *attr, struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; - struct bfa_ioc_attr_s ioc_attr; + char fw_ver[BFA_VERSION_LEN]; - memset(&ioc_attr, 0, sizeof(ioc_attr)); - bfa_get_attr(&bfad->bfa, &ioc_attr); - return snprintf(buf, PAGE_SIZE, "%s\n", ioc_attr.adapter_attr.fw_ver); + bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver); + return snprintf(buf, PAGE_SIZE, "%s\n", fw_ver); } static ssize_t @@ -556,11 +546,9 @@ bfad_im_num_of_ports_show(struct device *dev, struct device_attribute *attr, struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; - struct bfa_ioc_attr_s ioc_attr; - memset(&ioc_attr, 0, sizeof(ioc_attr)); - bfa_get_attr(&bfad->bfa, &ioc_attr); - return snprintf(buf, PAGE_SIZE, "%d\n", ioc_attr.adapter_attr.nports); + return snprintf(buf, PAGE_SIZE, "%d\n", + bfa_get_nports(&bfad->bfa)); } static ssize_t diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h index 94f4d84c71c9..8617a1aa8b25 100644 --- a/drivers/scsi/bfa/bfad_drv.h +++ b/drivers/scsi/bfa/bfad_drv.h @@ -139,6 +139,18 @@ struct bfad_cfg_param_s { u32 binding_method; }; +union bfad_tmp_buf { + /* From struct bfa_adapter_attr_s */ + char manufacturer[BFA_ADAPTER_MFG_NAME_LEN]; + char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN]; + char model[BFA_ADAPTER_MODEL_NAME_LEN]; + char fw_ver[BFA_VERSION_LEN]; + char optrom_ver[BFA_VERSION_LEN]; + + /* From struct bfa_ioc_pci_attr_s */ + u8 chip_rev[BFA_IOC_CHIP_REV_LEN]; /* chip revision */ +}; + /* * BFAD (PCI function) data structure */ @@ -182,6 +194,7 @@ struct bfad_s { struct bfa_plog_s plog_buf; int ref_count; bfa_boolean_t ipfc_enabled; + union bfad_tmp_buf tmp_buf; struct fc_host_statistics link_stats; struct kobject *bfa_kobj; diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c index 23390b40b9c3..cee3d89d1412 100644 --- a/drivers/scsi/bfa/bfad_im.c +++ b/drivers/scsi/bfa/bfad_im.c @@ -167,17 +167,15 @@ bfad_im_info(struct Scsi_Host *shost) static char bfa_buf[256]; struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) shost->hostdata[0]; - struct bfa_ioc_attr_s ioc_attr; struct bfad_s *bfad = im_port->bfad; + char model[BFA_ADAPTER_MODEL_NAME_LEN]; - memset(&ioc_attr, 0, sizeof(ioc_attr)); - bfa_get_attr(&bfad->bfa, &ioc_attr); + bfa_get_adapter_model(&bfad->bfa, model); memset(bfa_buf, 0, sizeof(bfa_buf)); snprintf(bfa_buf, sizeof(bfa_buf), - "Brocade FC/FCOE Adapter, " "model: %s hwpath: %s driver: %s", - ioc_attr.adapter_attr.model, bfad->pci_name, - BFAD_DRIVER_VERSION); + "Brocade FC/FCOE Adapter, " "model: %s hwpath: %s driver: %s", + model, bfad->pci_name, BFAD_DRIVER_VERSION); return bfa_buf; } @@ -931,10 +929,9 @@ bfad_os_fc_host_init(struct bfad_im_port_s *im_port) struct Scsi_Host *host = im_port->shost; struct bfad_s *bfad = im_port->bfad; struct bfad_port_s *port = im_port->port; - union attr { - struct bfa_pport_attr_s pattr; - struct bfa_ioc_attr_s ioc_attr; - } attr; + struct bfa_pport_attr_s pattr; + char model[BFA_ADAPTER_MODEL_NAME_LEN]; + char fw_ver[BFA_VERSION_LEN]; fc_host_node_name(host) = bfa_os_htonll((bfa_fcs_port_get_nwwn(port->fcs_port))); @@ -954,20 +951,18 @@ bfad_os_fc_host_init(struct bfad_im_port_s *im_port) /* For fibre channel services type 0x20 */ fc_host_supported_fc4s(host)[7] = 1; - memset(&attr.ioc_attr, 0, sizeof(attr.ioc_attr)); - bfa_get_attr(&bfad->bfa, &attr.ioc_attr); + bfa_get_adapter_model(&bfad->bfa, model); + bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver); sprintf(fc_host_symbolic_name(host), "Brocade %s FV%s DV%s", - attr.ioc_attr.adapter_attr.model, - attr.ioc_attr.adapter_attr.fw_ver, BFAD_DRIVER_VERSION); + model, fw_ver, BFAD_DRIVER_VERSION); fc_host_supported_speeds(host) = 0; fc_host_supported_speeds(host) |= FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; - memset(&attr.pattr, 0, sizeof(attr.pattr)); - bfa_fcport_get_attr(&bfad->bfa, &attr.pattr); - fc_host_maxframe_size(host) = attr.pattr.pport_cfg.maxfrsize; + bfa_fcport_get_attr(&bfad->bfa, &pattr); + fc_host_maxframe_size(host) = pattr.pport_cfg.maxfrsize; } static void diff --git a/drivers/scsi/bfa/fabric.c b/drivers/scsi/bfa/fabric.c index e1a4b312e9d4..b4e05ad1b47e 100644 --- a/drivers/scsi/bfa/fabric.c +++ b/drivers/scsi/bfa/fabric.c @@ -1235,14 +1235,8 @@ bfa_fcs_fabric_aen_post(struct bfa_fcs_port_s *port, wwn2str(pwwn_ptr, pwwn); wwn2str(fwwn_ptr, fwwn); - switch (event) { - case BFA_PORT_AEN_FABRIC_NAME_CHANGE: - bfa_log(logmod, BFA_AEN_PORT_FABRIC_NAME_CHANGE, pwwn_ptr, - fwwn_ptr); - break; - default: - break; - } + bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, event), + pwwn_ptr, fwwn_ptr); aen_data.port.pwwn = pwwn; aen_data.port.fwwn = fwwn; diff --git a/drivers/scsi/bfa/fcpim.c b/drivers/scsi/bfa/fcpim.c index 71d23d19bc9d..ef50ec2a1950 100644 --- a/drivers/scsi/bfa/fcpim.c +++ b/drivers/scsi/bfa/fcpim.c @@ -385,19 +385,8 @@ bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim, wwn2str(lpwwn_ptr, lpwwn); wwn2str(rpwwn_ptr, rpwwn); - switch (event) { - case BFA_ITNIM_AEN_ONLINE: - bfa_log(logmod, BFA_AEN_ITNIM_ONLINE, rpwwn_ptr, lpwwn_ptr); - break; - case BFA_ITNIM_AEN_OFFLINE: - bfa_log(logmod, BFA_AEN_ITNIM_OFFLINE, rpwwn_ptr, lpwwn_ptr); - break; - case BFA_ITNIM_AEN_DISCONNECT: - bfa_log(logmod, BFA_AEN_ITNIM_DISCONNECT, rpwwn_ptr, lpwwn_ptr); - break; - default: - break; - } + bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_ITNIM, event), + rpwwn_ptr, lpwwn_ptr); aen_data.itnim.vf_id = rport->port->fabric->vf_id; aen_data.itnim.ppwwn = diff --git a/drivers/scsi/bfa/include/bfa.h b/drivers/scsi/bfa/include/bfa.h index 17654cae0c75..1f5966cfbd16 100644 --- a/drivers/scsi/bfa/include/bfa.h +++ b/drivers/scsi/bfa/include/bfa.h @@ -106,6 +106,26 @@ struct bfa_sge_s { bfa_ioc_fetch_stats(&(__bfa)->ioc, __ioc_stats) #define bfa_ioc_clear_stats(__bfa) \ bfa_ioc_clr_stats(&(__bfa)->ioc) +#define bfa_get_nports(__bfa) \ + bfa_ioc_get_nports(&(__bfa)->ioc) +#define bfa_get_adapter_manufacturer(__bfa, __manufacturer) \ + bfa_ioc_get_adapter_manufacturer(&(__bfa)->ioc, __manufacturer) +#define bfa_get_adapter_model(__bfa, __model) \ + bfa_ioc_get_adapter_model(&(__bfa)->ioc, __model) +#define bfa_get_adapter_serial_num(__bfa, __serial_num) \ + bfa_ioc_get_adapter_serial_num(&(__bfa)->ioc, __serial_num) +#define bfa_get_adapter_fw_ver(__bfa, __fw_ver) \ + bfa_ioc_get_adapter_fw_ver(&(__bfa)->ioc, __fw_ver) +#define bfa_get_adapter_optrom_ver(__bfa, __optrom_ver) \ + bfa_ioc_get_adapter_optrom_ver(&(__bfa)->ioc, __optrom_ver) +#define bfa_get_pci_chip_rev(__bfa, __chip_rev) \ + bfa_ioc_get_pci_chip_rev(&(__bfa)->ioc, __chip_rev) +#define bfa_get_ioc_state(__bfa) \ + bfa_ioc_get_state(&(__bfa)->ioc) +#define bfa_get_type(__bfa) \ + bfa_ioc_get_type(&(__bfa)->ioc) +#define bfa_get_mac(__bfa) \ + bfa_ioc_get_mac(&(__bfa)->ioc) /* * bfa API functions diff --git a/drivers/scsi/bfa/include/cs/bfa_log.h b/drivers/scsi/bfa/include/cs/bfa_log.h index 761cbe22130a..bc334e0a93fa 100644 --- a/drivers/scsi/bfa/include/cs/bfa_log.h +++ b/drivers/scsi/bfa/include/cs/bfa_log.h @@ -157,7 +157,7 @@ typedef void (*bfa_log_cb_t)(struct bfa_log_mod_s *log_mod, u32 msg_id, struct bfa_log_mod_s { - char instance_info[16]; /* instance info */ + char instance_info[BFA_STRING_32]; /* instance info */ int log_level[BFA_LOG_MODULE_ID_MAX + 1]; /* log level for modules */ bfa_log_cb_t cbfn; /* callback function */ diff --git a/drivers/scsi/bfa/rport.c b/drivers/scsi/bfa/rport.c index 8c5969c86934..8e73dd9a625a 100644 --- a/drivers/scsi/bfa/rport.c +++ b/drivers/scsi/bfa/rport.c @@ -2039,13 +2039,10 @@ bfa_fcs_rport_aen_post(struct bfa_fcs_rport_s *rport, switch (event) { case BFA_RPORT_AEN_ONLINE: - bfa_log(logmod, BFA_AEN_RPORT_ONLINE, rpwwn_ptr, lpwwn_ptr); - break; case BFA_RPORT_AEN_OFFLINE: - bfa_log(logmod, BFA_AEN_RPORT_OFFLINE, rpwwn_ptr, lpwwn_ptr); - break; case BFA_RPORT_AEN_DISCONNECT: - bfa_log(logmod, BFA_AEN_RPORT_DISCONNECT, rpwwn_ptr, lpwwn_ptr); + bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_RPORT, event), + rpwwn_ptr, lpwwn_ptr); break; case BFA_RPORT_AEN_QOS_PRIO: aen_data.rport.priv.qos = data->priv.qos; diff --git a/drivers/scsi/bfa/vport.c b/drivers/scsi/bfa/vport.c index c5e534eb1620..27cd619a227a 100644 --- a/drivers/scsi/bfa/vport.c +++ b/drivers/scsi/bfa/vport.c @@ -447,22 +447,8 @@ bfa_fcs_vport_aen_post(bfa_fcs_lport_t *port, enum bfa_lport_aen_event event) bfa_assert(role <= BFA_PORT_ROLE_FCP_MAX); - switch (event) { - case BFA_LPORT_AEN_NPIV_DUP_WWN: - bfa_log(logmod, BFA_AEN_LPORT_NPIV_DUP_WWN, lpwwn_ptr, - role_str[role / 2]); - break; - case BFA_LPORT_AEN_NPIV_FABRIC_MAX: - bfa_log(logmod, BFA_AEN_LPORT_NPIV_FABRIC_MAX, lpwwn_ptr, - role_str[role / 2]); - break; - case BFA_LPORT_AEN_NPIV_UNKNOWN: - bfa_log(logmod, BFA_AEN_LPORT_NPIV_UNKNOWN, lpwwn_ptr, - role_str[role / 2]); - break; - default: - break; - } + bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, event), lpwwn_ptr, + role_str[role/2]); aen_data.lport.vf_id = port->fabric->vf_id; aen_data.lport.roles = role; -- cgit v1.2.3 From ca8b4327e405820966971236224db0e0724b5673 Mon Sep 17 00:00:00 2001 From: Krishna Gudipati Date: Fri, 5 Mar 2010 19:38:07 -0800 Subject: [SCSI] bfa: Modified the portstats get/clear logic Modified the portstats get/clear logic for port physical/FCoE/QoS stats. Added more stats to FC Fixed some issues with FCoE stats collection. Signed-off-by: Krishna Gudipati Signed-off-by: James Bottomley --- drivers/scsi/bfa/bfa_fcport.c | 732 +++++++++---------------- drivers/scsi/bfa/bfa_port_priv.h | 41 +- drivers/scsi/bfa/bfad_attr.c | 5 +- drivers/scsi/bfa/include/bfa_svc.h | 24 +- drivers/scsi/bfa/include/bfi/bfi.h | 4 +- drivers/scsi/bfa/include/bfi/bfi_pport.h | 177 ++---- drivers/scsi/bfa/include/defs/bfa_defs_pport.h | 128 ++--- 7 files changed, 405 insertions(+), 706 deletions(-) diff --git a/drivers/scsi/bfa/bfa_fcport.c b/drivers/scsi/bfa/bfa_fcport.c index 0da612010787..d109e651b1c5 100644 --- a/drivers/scsi/bfa/bfa_fcport.c +++ b/drivers/scsi/bfa/bfa_fcport.c @@ -47,16 +47,10 @@ static void bfa_fcport_callback(struct bfa_fcport_s *fcport, enum bfa_pport_linkstate event); static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_pport_linkstate event); -static void __bfa_cb_fcport_stats(void *cbarg, bfa_boolean_t complete); static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete); -static void bfa_fcport_stats_timeout(void *cbarg); +static void bfa_fcport_stats_get_timeout(void *cbarg); static void bfa_fcport_stats_clr_timeout(void *cbarg); -static void __bfa_cb_port_stats(void *cbarg, bfa_boolean_t complete); -static void __bfa_cb_port_stats_clr(void *cbarg, bfa_boolean_t complete); -static void bfa_port_stats_timeout(void *cbarg); -static void bfa_port_stats_clr_timeout(void *cbarg); - /** * bfa_pport_private */ @@ -303,7 +297,7 @@ static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event) { - struct bfi_pport_event_s *pevent = fcport->event_arg.i2hmsg.event; + struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event; bfa_trc(fcport->bfa, event); switch (event) { @@ -819,8 +813,32 @@ __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete) bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION); } -#define PPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \ - BFA_CACHELINE_SZ)) +static void +bfa_fcport_callback(struct bfa_fcport_s *fcport, enum bfa_pport_linkstate event) +{ + if (fcport->bfa->fcs) { + fcport->event_cbfn(fcport->event_cbarg, event); + return; + } + + switch (event) { + case BFA_PPORT_LINKUP: + bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP); + break; + case BFA_PPORT_LINKDOWN: + bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN); + break; + default: + bfa_assert(0); + } +} + +static void +bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_pport_linkstate event) +{ + ln->ln_event = event; + bfa_cb_queue(ln->fcport->bfa, &ln->ln_qe, __bfa_cb_fcport_event, ln); +} #define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \ BFA_CACHELINE_SZ)) @@ -829,8 +847,7 @@ static void bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len) { - *dm_len += PPORT_STATS_DMA_SZ; - *dm_len += PPORT_STATS_DMA_SZ; + *dm_len += FCPORT_STATS_DMA_SZ; } static void @@ -852,16 +869,7 @@ bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo) fcport->stats_kva = dm_kva; fcport->stats_pa = dm_pa; - fcport->stats = (union bfa_pport_stats_u *)dm_kva; - - dm_kva += PPORT_STATS_DMA_SZ; - dm_pa += PPORT_STATS_DMA_SZ; - - /* FC port stats */ - - fcport->fcport_stats_kva = dm_kva; - fcport->fcport_stats_pa = dm_pa; - fcport->fcport_stats = (union bfa_fcport_stats_u *) dm_kva; + fcport->stats = (union bfa_fcport_stats_u *)dm_kva; dm_kva += FCPORT_STATS_DMA_SZ; dm_pa += FCPORT_STATS_DMA_SZ; @@ -957,7 +965,7 @@ bfa_fcport_iocdisable(struct bfa_s *bfa) static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport) { - struct bfi_pport_event_s *pevent = fcport->event_arg.i2hmsg.event; + struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event; fcport->speed = pevent->link_state.speed; fcport->topology = pevent->link_state.topology; @@ -989,7 +997,7 @@ bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport) static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport) { - struct bfi_pport_enable_req_s *m; + struct bfi_fcport_enable_req_s *m; /** * Increment message tag before queue check, so that responses to old @@ -1007,15 +1015,14 @@ bfa_fcport_send_enable(struct bfa_fcport_s *fcport) return BFA_FALSE; } - bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_ENABLE_REQ, - bfa_lpuid(fcport->bfa)); + bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ, + bfa_lpuid(fcport->bfa)); m->nwwn = fcport->nwwn; m->pwwn = fcport->pwwn; m->port_cfg = fcport->cfg; m->msgtag = fcport->msgtag; m->port_cfg.maxfrsize = bfa_os_htons(fcport->cfg.maxfrsize); bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa); - bfa_dma_be_addr_set(m->fcport_stats_dma_addr, fcport->fcport_stats_pa); bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo); bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi); @@ -1032,7 +1039,7 @@ bfa_fcport_send_enable(struct bfa_fcport_s *fcport) static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport) { - bfi_pport_disable_req_t *m; + struct bfi_fcport_req_s *m; /** * Increment message tag before queue check, so that responses to old @@ -1050,8 +1057,8 @@ bfa_fcport_send_disable(struct bfa_fcport_s *fcport) return BFA_FALSE; } - bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_DISABLE_REQ, - bfa_lpuid(fcport->bfa)); + bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ, + bfa_lpuid(fcport->bfa)); m->msgtag = fcport->msgtag; /** @@ -1077,7 +1084,7 @@ bfa_fcport_send_txcredit(void *port_cbarg) { struct bfa_fcport_s *fcport = port_cbarg; - struct bfi_pport_set_svc_params_req_s *m; + struct bfi_fcport_set_svc_params_req_s *m; /** * check for room in queue to send request now @@ -1088,8 +1095,8 @@ bfa_fcport_send_txcredit(void *port_cbarg) return; } - bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_SET_SVC_PARAMS_REQ, - bfa_lpuid(fcport->bfa)); + bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ, + bfa_lpuid(fcport->bfa)); m->tx_bbcredit = bfa_os_htons((u16) fcport->cfg.tx_bbcredit); /** @@ -1098,7 +1105,158 @@ bfa_fcport_send_txcredit(void *port_cbarg) bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); } +static void +bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d, + struct bfa_qos_stats_s *s) +{ + u32 *dip = (u32 *) d; + u32 *sip = (u32 *) s; + int i; + + /* Now swap the 32 bit fields */ + for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i) + dip[i] = bfa_os_ntohl(sip[i]); +} + +static void +bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d, + struct bfa_fcoe_stats_s *s) +{ + u32 *dip = (u32 *) d; + u32 *sip = (u32 *) s; + int i; + + for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32)); + i = i + 2) { +#ifdef __BIGENDIAN + dip[i] = bfa_os_ntohl(sip[i]); + dip[i + 1] = bfa_os_ntohl(sip[i + 1]); +#else + dip[i] = bfa_os_ntohl(sip[i + 1]); + dip[i + 1] = bfa_os_ntohl(sip[i]); +#endif + } +} + +static void +__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_fcport_s *fcport = cbarg; + + if (complete) { + if (fcport->stats_status == BFA_STATUS_OK) { + + /* Swap FC QoS or FCoE stats */ + if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) + bfa_fcport_qos_stats_swap( + &fcport->stats_ret->fcqos, + &fcport->stats->fcqos); + else + bfa_fcport_fcoe_stats_swap( + &fcport->stats_ret->fcoe, + &fcport->stats->fcoe); + } + fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status); + } else { + fcport->stats_busy = BFA_FALSE; + fcport->stats_status = BFA_STATUS_OK; + } +} + +static void +bfa_fcport_stats_get_timeout(void *cbarg) +{ + struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; + + bfa_trc(fcport->bfa, fcport->stats_qfull); + + if (fcport->stats_qfull) { + bfa_reqq_wcancel(&fcport->stats_reqq_wait); + fcport->stats_qfull = BFA_FALSE; + } + + fcport->stats_status = BFA_STATUS_ETIMER; + bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get, + fcport); +} + +static void +bfa_fcport_send_stats_get(void *cbarg) +{ + struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; + struct bfi_fcport_req_s *msg; + + msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); + + if (!msg) { + fcport->stats_qfull = BFA_TRUE; + bfa_reqq_winit(&fcport->stats_reqq_wait, + bfa_fcport_send_stats_get, fcport); + bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, + &fcport->stats_reqq_wait); + return; + } + fcport->stats_qfull = BFA_FALSE; + + bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s)); + bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ, + bfa_lpuid(fcport->bfa)); + bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); +} + +static void +__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete) +{ + struct bfa_fcport_s *fcport = cbarg; + + if (complete) { + fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status); + } else { + fcport->stats_busy = BFA_FALSE; + fcport->stats_status = BFA_STATUS_OK; + } +} + +static void +bfa_fcport_stats_clr_timeout(void *cbarg) +{ + struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; + + bfa_trc(fcport->bfa, fcport->stats_qfull); + + if (fcport->stats_qfull) { + bfa_reqq_wcancel(&fcport->stats_reqq_wait); + fcport->stats_qfull = BFA_FALSE; + } + + fcport->stats_status = BFA_STATUS_ETIMER; + bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, + __bfa_cb_fcport_stats_clr, fcport); +} + +static void +bfa_fcport_send_stats_clear(void *cbarg) +{ + struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; + struct bfi_fcport_req_s *msg; + msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); + + if (!msg) { + fcport->stats_qfull = BFA_TRUE; + bfa_reqq_winit(&fcport->stats_reqq_wait, + bfa_fcport_send_stats_clear, fcport); + bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, + &fcport->stats_reqq_wait); + return; + } + fcport->stats_qfull = BFA_FALSE; + + bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s)); + bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ, + bfa_lpuid(fcport->bfa)); + bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); +} /** * bfa_pport_public @@ -1111,23 +1269,23 @@ void bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - union bfi_pport_i2h_msg_u i2hmsg; + union bfi_fcport_i2h_msg_u i2hmsg; i2hmsg.msg = msg; fcport->event_arg.i2hmsg = i2hmsg; switch (msg->mhdr.msg_id) { - case BFI_PPORT_I2H_ENABLE_RSP: - if (fcport->msgtag == i2hmsg.enable_rsp->msgtag) + case BFI_FCPORT_I2H_ENABLE_RSP: + if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP); break; - case BFI_PPORT_I2H_DISABLE_RSP: - if (fcport->msgtag == i2hmsg.enable_rsp->msgtag) + case BFI_FCPORT_I2H_DISABLE_RSP: + if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP); break; - case BFI_PPORT_I2H_EVENT: + case BFI_FCPORT_I2H_EVENT: switch (i2hmsg.event->link_state.linkstate) { case BFA_PPORT_LINKUP: bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP); @@ -1141,58 +1299,27 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) } break; - case BFI_PPORT_I2H_GET_STATS_RSP: - case BFI_PPORT_I2H_GET_QOS_STATS_RSP: - /* - * check for timer pop before processing the rsp - */ - if (fcport->stats_busy == BFA_FALSE - || fcport->stats_status == BFA_STATUS_ETIMER) - break; - - bfa_timer_stop(&fcport->timer); - fcport->stats_status = i2hmsg.getstats_rsp->status; - bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_port_stats, - fcport); - break; - case BFI_PPORT_I2H_CLEAR_STATS_RSP: - case BFI_PPORT_I2H_CLEAR_QOS_STATS_RSP: - /* - * check for timer pop before processing the rsp - */ - if (fcport->stats_busy == BFA_FALSE - || fcport->stats_status == BFA_STATUS_ETIMER) - break; - - bfa_timer_stop(&fcport->timer); - fcport->stats_status = BFA_STATUS_OK; - bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, - __bfa_cb_port_stats_clr, fcport); - break; - - case BFI_FCPORT_I2H_GET_STATS_RSP: + case BFI_FCPORT_I2H_STATS_GET_RSP: /* * check for timer pop before processing the rsp */ if (fcport->stats_busy == BFA_FALSE || - fcport->stats_status == BFA_STATUS_ETIMER) { + fcport->stats_status == BFA_STATUS_ETIMER) break; - } bfa_timer_stop(&fcport->timer); - fcport->stats_status = i2hmsg.getstats_rsp->status; + fcport->stats_status = i2hmsg.pstatsget_rsp->status; bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, - __bfa_cb_fcport_stats, fcport); + __bfa_cb_fcport_stats_get, fcport); break; - case BFI_FCPORT_I2H_CLEAR_STATS_RSP: + case BFI_FCPORT_I2H_STATS_CLEAR_RSP: /* * check for timer pop before processing the rsp */ if (fcport->stats_busy == BFA_FALSE || - fcport->stats_status == BFA_STATUS_ETIMER) { + fcport->stats_status == BFA_STATUS_ETIMER) break; - } bfa_timer_stop(&fcport->timer); fcport->stats_status = BFA_STATUS_OK; @@ -1206,8 +1333,6 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) } } - - /** * bfa_pport_api */ @@ -1472,329 +1597,13 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr) attr->port_state = BFA_PPORT_ST_FWMISMATCH; } -static void -bfa_port_stats_query(void *cbarg) -{ - struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg; - bfi_pport_get_stats_req_t *msg; - - msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); - - if (!msg) { - fcport->stats_qfull = BFA_TRUE; - bfa_reqq_winit(&fcport->stats_reqq_wait, bfa_port_stats_query, - fcport); - bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, - &fcport->stats_reqq_wait); - return; - } - fcport->stats_qfull = BFA_FALSE; - - bfa_os_memset(msg, 0, sizeof(bfi_pport_get_stats_req_t)); - bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_GET_STATS_REQ, - bfa_lpuid(fcport->bfa)); - bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); -} - -static void -bfa_port_stats_clear(void *cbarg) -{ - struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg; - bfi_pport_clear_stats_req_t *msg; - - msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); - - if (!msg) { - fcport->stats_qfull = BFA_TRUE; - bfa_reqq_winit(&fcport->stats_reqq_wait, bfa_port_stats_clear, - fcport); - bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, - &fcport->stats_reqq_wait); - return; - } - fcport->stats_qfull = BFA_FALSE; - - bfa_os_memset(msg, 0, sizeof(bfi_pport_clear_stats_req_t)); - bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_CLEAR_STATS_REQ, - bfa_lpuid(fcport->bfa)); - bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); -} - -static void -bfa_fcport_stats_query(void *cbarg) -{ - struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; - bfi_pport_get_stats_req_t *msg; - - msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); - - if (!msg) { - fcport->stats_qfull = BFA_TRUE; - bfa_reqq_winit(&fcport->stats_reqq_wait, - bfa_fcport_stats_query, fcport); - bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, - &fcport->stats_reqq_wait); - return; - } - fcport->stats_qfull = BFA_FALSE; - - bfa_os_memset(msg, 0, sizeof(bfi_pport_get_stats_req_t)); - bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_FCPORT_H2I_GET_STATS_REQ, - bfa_lpuid(fcport->bfa)); - bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); -} - -static void -bfa_fcport_stats_clear(void *cbarg) -{ - struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; - bfi_pport_clear_stats_req_t *msg; - - msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); - - if (!msg) { - fcport->stats_qfull = BFA_TRUE; - bfa_reqq_winit(&fcport->stats_reqq_wait, - bfa_fcport_stats_clear, fcport); - bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, - &fcport->stats_reqq_wait); - return; - } - fcport->stats_qfull = BFA_FALSE; - - bfa_os_memset(msg, 0, sizeof(bfi_pport_clear_stats_req_t)); - bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_FCPORT_H2I_CLEAR_STATS_REQ, - bfa_lpuid(fcport->bfa)); - bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); -} - -static void -bfa_port_qos_stats_clear(void *cbarg) -{ - struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg; - bfi_pport_clear_qos_stats_req_t *msg; - - msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); - - if (!msg) { - fcport->stats_qfull = BFA_TRUE; - bfa_reqq_winit(&fcport->stats_reqq_wait, - bfa_port_qos_stats_clear, fcport); - bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT, - &fcport->stats_reqq_wait); - return; - } - fcport->stats_qfull = BFA_FALSE; - - bfa_os_memset(msg, 0, sizeof(bfi_pport_clear_qos_stats_req_t)); - bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_CLEAR_QOS_STATS_REQ, - bfa_lpuid(fcport->bfa)); - bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); -} - -static void -bfa_fcport_stats_swap(union bfa_fcport_stats_u *d, union bfa_fcport_stats_u *s) -{ - u32 *dip = (u32 *) d; - u32 *sip = (u32 *) s; - int i; - - /* Do 64 bit fields swap first */ - for (i = 0; i < ((sizeof(union bfa_fcport_stats_u) - - sizeof(struct bfa_qos_stats_s))/sizeof(u32)); i = i + 2) { -#ifdef __BIGENDIAN - dip[i] = bfa_os_ntohl(sip[i]); - dip[i + 1] = bfa_os_ntohl(sip[i + 1]); -#else - dip[i] = bfa_os_ntohl(sip[i + 1]); - dip[i + 1] = bfa_os_ntohl(sip[i]); -#endif - } - - /* Now swap the 32 bit fields */ - for (; i < (sizeof(union bfa_fcport_stats_u)/sizeof(u32)); ++i) - dip[i] = bfa_os_ntohl(sip[i]); -} - -static void -bfa_port_stats_swap(union bfa_pport_stats_u *d, union bfa_pport_stats_u *s) -{ - u32 *dip = (u32 *) d; - u32 *sip = (u32 *) s; - int i; - - /* Do 64 bit fields swap first */ - for (i = 0; i < (sizeof(union bfa_pport_stats_u) / sizeof(u32)); - i = i + 2) { -#ifdef __BIGENDIAN - dip[i] = bfa_os_ntohl(sip[i]); - dip[i + 1] = bfa_os_ntohl(sip[i + 1]); -#else - dip[i] = bfa_os_ntohl(sip[i + 1]); - dip[i + 1] = bfa_os_ntohl(sip[i]); -#endif - } -} - -static void -__bfa_cb_port_stats_clr(void *cbarg, bfa_boolean_t complete) -{ - struct bfa_fcport_s *fcport = cbarg; - - if (complete) { - fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status); - } else { - fcport->stats_busy = BFA_FALSE; - fcport->stats_status = BFA_STATUS_OK; - } -} - -static void -__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete) -{ - struct bfa_fcport_s *fcport = cbarg; - - if (complete) { - fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status); - } else { - fcport->stats_busy = BFA_FALSE; - fcport->stats_status = BFA_STATUS_OK; - } -} - -static void -bfa_port_stats_clr_timeout(void *cbarg) -{ - struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg; - - bfa_trc(fcport->bfa, fcport->stats_qfull); - - if (fcport->stats_qfull) { - bfa_reqq_wcancel(&fcport->stats_reqq_wait); - fcport->stats_qfull = BFA_FALSE; - } - - fcport->stats_status = BFA_STATUS_ETIMER; - bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, - __bfa_cb_port_stats_clr, fcport); -} - -static void -bfa_fcport_callback(struct bfa_fcport_s *fcport, enum bfa_pport_linkstate event) -{ - if (fcport->bfa->fcs) { - fcport->event_cbfn(fcport->event_cbarg, event); - return; - } - - switch (event) { - case BFA_PPORT_LINKUP: - bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP); - break; - case BFA_PPORT_LINKDOWN: - bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN); - break; - default: - bfa_assert(0); - } -} - -static void -bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_pport_linkstate event) -{ - ln->ln_event = event; - bfa_cb_queue(ln->fcport->bfa, &ln->ln_qe, __bfa_cb_fcport_event, ln); -} - -static void -bfa_fcport_stats_clr_timeout(void *cbarg) -{ - struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; - - bfa_trc(fcport->bfa, fcport->stats_qfull); - - if (fcport->stats_qfull) { - bfa_reqq_wcancel(&fcport->stats_reqq_wait); - fcport->stats_qfull = BFA_FALSE; - } - - fcport->stats_status = BFA_STATUS_ETIMER; - bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_clr, - fcport); -} - -static void -__bfa_cb_port_stats(void *cbarg, bfa_boolean_t complete) -{ - struct bfa_fcport_s *fcport = cbarg; - - if (complete) { - if (fcport->stats_status == BFA_STATUS_OK) - bfa_port_stats_swap(fcport->stats_ret, fcport->stats); - fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status); - } else { - fcport->stats_busy = BFA_FALSE; - fcport->stats_status = BFA_STATUS_OK; - } -} - -static void -bfa_port_stats_timeout(void *cbarg) -{ - struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg; - - bfa_trc(fcport->bfa, fcport->stats_qfull); - - if (fcport->stats_qfull) { - bfa_reqq_wcancel(&fcport->stats_reqq_wait); - fcport->stats_qfull = BFA_FALSE; - } - - fcport->stats_status = BFA_STATUS_ETIMER; - bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_port_stats, fcport); -} - -static void -__bfa_cb_fcport_stats(void *cbarg, bfa_boolean_t complete) -{ - struct bfa_fcport_s *fcport = cbarg; - - if (complete) { - if (fcport->stats_status == BFA_STATUS_OK) { - bfa_fcport_stats_swap(fcport->fcport_stats_ret, - fcport->fcport_stats); - } - fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status); - } else { - fcport->stats_busy = BFA_FALSE; - fcport->stats_status = BFA_STATUS_OK; - } -} - -static void -bfa_fcport_stats_timeout(void *cbarg) -{ - struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg; - - bfa_trc(fcport->bfa, fcport->stats_qfull); - - if (fcport->stats_qfull) { - bfa_reqq_wcancel(&fcport->stats_reqq_wait); - fcport->stats_qfull = BFA_FALSE; - } - - fcport->stats_status = BFA_STATUS_ETIMER; - bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats, - fcport); -} - -#define BFA_PORT_STATS_TOV 1000 +#define BFA_FCPORT_STATS_TOV 1000 /** - * Fetch port attributes. + * Fetch port attributes (FCQoS or FCoE). */ bfa_status_t -bfa_pport_get_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats, +bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats, bfa_cb_pport_t cbfn, void *cbarg) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); @@ -1804,20 +1613,23 @@ bfa_pport_get_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats, return BFA_STATUS_DEVBUSY; } - fcport->stats_busy = BFA_TRUE; - fcport->stats_ret = stats; - fcport->stats_cbfn = cbfn; + fcport->stats_busy = BFA_TRUE; + fcport->stats_ret = stats; + fcport->stats_cbfn = cbfn; fcport->stats_cbarg = cbarg; - bfa_port_stats_query(fcport); + bfa_fcport_send_stats_get(fcport); - bfa_timer_start(bfa, &fcport->timer, bfa_port_stats_timeout, fcport, - BFA_PORT_STATS_TOV); + bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout, + fcport, BFA_FCPORT_STATS_TOV); return BFA_STATUS_OK; } +/** + * Reset port statistics (FCQoS or FCoE). + */ bfa_status_t -bfa_pport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg) +bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); @@ -1830,65 +1642,61 @@ bfa_pport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg) fcport->stats_cbfn = cbfn; fcport->stats_cbarg = cbarg; - bfa_port_stats_clear(fcport); + bfa_fcport_send_stats_clear(fcport); - bfa_timer_start(bfa, &fcport->timer, bfa_port_stats_clr_timeout, - fcport, BFA_PORT_STATS_TOV); + bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout, + fcport, BFA_FCPORT_STATS_TOV); return BFA_STATUS_OK; } /** - * @brief - * Fetch FCPort statistics. - * Todo TBD: sharing timer,stats_busy and other resources of fcport for now - - * ideally we want to create seperate ones for fcport once bfa_fcport_s is - * decided. - * + * Fetch FCQoS port statistics */ bfa_status_t -bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats, - bfa_cb_pport_t cbfn, void *cbarg) +bfa_fcport_get_qos_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats, + bfa_cb_pport_t cbfn, void *cbarg) { - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - if (fcport->stats_busy) { - bfa_trc(bfa, fcport->stats_busy); - return BFA_STATUS_DEVBUSY; - } - - fcport->stats_busy = BFA_TRUE; - fcport->fcport_stats_ret = stats; - fcport->stats_cbfn = cbfn; - fcport->stats_cbarg = cbarg; + /* Meaningful only for FC mode */ + bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc)); - bfa_fcport_stats_query(fcport); - - bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_timeout, fcport, - BFA_PORT_STATS_TOV); - - return BFA_STATUS_OK; + return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg); } +/** + * Reset FCoE port statistics + */ bfa_status_t -bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg) +bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg) { - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); + /* Meaningful only for FC mode */ + bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc)); - if (fcport->stats_busy) { - bfa_trc(bfa, fcport->stats_busy); - return BFA_STATUS_DEVBUSY; - } + return bfa_fcport_clear_stats(bfa, cbfn, cbarg); +} - fcport->stats_busy = BFA_TRUE; - fcport->stats_cbfn = cbfn; - fcport->stats_cbarg = cbarg; +/** + * Fetch FCQoS port statistics + */ +bfa_status_t +bfa_fcport_get_fcoe_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats, + bfa_cb_pport_t cbfn, void *cbarg) +{ + /* Meaningful only for FCoE mode */ + bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc)); - bfa_fcport_stats_clear(fcport); + return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg); +} - bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout, - fcport, BFA_PORT_STATS_TOV); +/** + * Reset FCoE port statistics + */ +bfa_status_t +bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg) +{ + /* Meaningful only for FCoE mode */ + bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc)); - return BFA_STATUS_OK; + return bfa_fcport_clear_stats(bfa, cbfn, cbarg); } bfa_status_t @@ -1945,40 +1753,6 @@ bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa, } } -/** - * Fetch QoS Stats. - */ -bfa_status_t -bfa_fcport_get_qos_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats, - bfa_cb_pport_t cbfn, void *cbarg) -{ - /* - * QoS stats is embedded in port stats - */ - return bfa_pport_get_stats(bfa, stats, cbfn, cbarg); -} - -bfa_status_t -bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg) -{ - struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - - if (fcport->stats_busy) { - bfa_trc(bfa, fcport->stats_busy); - return BFA_STATUS_DEVBUSY; - } - - fcport->stats_busy = BFA_TRUE; - fcport->stats_cbfn = cbfn; - fcport->stats_cbarg = cbarg; - - bfa_port_qos_stats_clear(fcport); - - bfa_timer_start(bfa, &fcport->timer, bfa_port_stats_clr_timeout, - fcport, BFA_PORT_STATS_TOV); - return BFA_STATUS_OK; -} - /** * Fetch port attributes. */ diff --git a/drivers/scsi/bfa/bfa_port_priv.h b/drivers/scsi/bfa/bfa_port_priv.h index 6d315ffb1e99..40e256ec67ff 100644 --- a/drivers/scsi/bfa/bfa_port_priv.h +++ b/drivers/scsi/bfa/bfa_port_priv.h @@ -46,6 +46,8 @@ struct bfa_fcport_s { enum bfa_pport_topology topology; /* current topology */ u8 myalpa; /* my ALPA in LOOP topology */ u8 rsvd[3]; + u32 mypid:24; + u32 rsvd_b:8; struct bfa_pport_cfg_s cfg; /* current port configuration */ struct bfa_qos_attr_s qos_attr; /* QoS Attributes */ struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */ @@ -59,40 +61,25 @@ struct bfa_fcport_s { void (*event_cbfn) (void *cbarg, bfa_pport_event_t event); union { - union bfi_pport_i2h_msg_u i2hmsg; + union bfi_fcport_i2h_msg_u i2hmsg; } event_arg; void *bfad; /* BFA driver handle */ struct bfa_fcport_ln_s ln; /* Link Notification */ struct bfa_cb_qe_s hcb_qe; /* BFA callback queue elem */ + struct bfa_timer_s timer; /* timer */ u32 msgtag; /* fimrware msg tag for reply */ u8 *stats_kva; u64 stats_pa; - union bfa_pport_stats_u *stats; /* pport stats */ - u32 mypid:24; - u32 rsvd_b:8; - struct bfa_timer_s timer; /* timer */ - union bfa_pport_stats_u *stats_ret; - /* driver stats location */ - bfa_status_t stats_status; - /* stats/statsclr status */ - bfa_boolean_t stats_busy; - /* outstanding stats/statsclr */ - bfa_boolean_t stats_qfull; - bfa_boolean_t diag_busy; - /* diag busy status */ - bfa_boolean_t beacon; - /* port beacon status */ - bfa_boolean_t link_e2e_beacon; - /* link beacon status */ - bfa_cb_pport_t stats_cbfn; - /* driver callback function */ - void *stats_cbarg; - /* *!< user callback arg */ - /* FCport stats */ - u8 *fcport_stats_kva; - u64 fcport_stats_pa; - union bfa_fcport_stats_u *fcport_stats; - union bfa_fcport_stats_u *fcport_stats_ret; + union bfa_fcport_stats_u *stats; + union bfa_fcport_stats_u *stats_ret; /* driver stats location */ + bfa_status_t stats_status; /* stats/statsclr status */ + bfa_boolean_t stats_busy; /* outstanding stats/statsclr */ + bfa_boolean_t stats_qfull; + bfa_cb_pport_t stats_cbfn; /* driver callback function */ + void *stats_cbarg; /* *!< user callback arg */ + bfa_boolean_t diag_busy; /* diag busy status */ + bfa_boolean_t beacon; /* port beacon status */ + bfa_boolean_t link_e2e_beacon; /* link beacon status */ }; #define BFA_FCPORT_MOD(__bfa) (&(__bfa)->modules.fcport) diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c index dd5cb20d4b37..d97f69191838 100644 --- a/drivers/scsi/bfa/bfad_attr.c +++ b/drivers/scsi/bfa/bfad_attr.c @@ -288,7 +288,7 @@ bfad_im_get_stats(struct Scsi_Host *shost) init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); memset(hstats, 0, sizeof(struct fc_host_statistics)); - rc = bfa_pport_get_stats(&bfad->bfa, + rc = bfa_port_get_stats(BFA_FCPORT(&bfad->bfa), (union bfa_pport_stats_u *) hstats, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); @@ -315,7 +315,8 @@ bfad_im_reset_stats(struct Scsi_Host *shost) init_completion(&fcomp.comp); spin_lock_irqsave(&bfad->bfad_lock, flags); - rc = bfa_pport_clear_stats(&bfad->bfa, bfad_hcb_comp, &fcomp); + rc = bfa_port_clear_stats(BFA_FCPORT(&bfad->bfa), bfad_hcb_comp, + &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (rc != BFA_STATUS_OK) diff --git a/drivers/scsi/bfa/include/bfa_svc.h b/drivers/scsi/bfa/include/bfa_svc.h index f2c30858900b..1349b99a3c6d 100644 --- a/drivers/scsi/bfa/include/bfa_svc.h +++ b/drivers/scsi/bfa/include/bfa_svc.h @@ -36,7 +36,7 @@ struct bfa_fcxp_s; struct bfa_rport_info_s { u16 max_frmsz; /* max rcv pdu size */ u32 pid:24, /* remote port ID */ - lp_tag:8; + lp_tag:8; /* tag */ u32 local_pid:24, /* local port ID */ cisc:8; /* CIRO supported */ u8 fc_class; /* supported FC classes. enum fc_cos */ @@ -55,7 +55,7 @@ struct bfa_rport_s { void *rport_drv; /* fcs/driver rport object */ u16 fw_handle; /* firmware rport handle */ u16 rport_tag; /* BFA rport tag */ - struct bfa_rport_info_s rport_info; /* rport info from *fcs/driver */ + struct bfa_rport_info_s rport_info; /* rport info from fcs/driver */ struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ struct bfa_cb_qe_s hcb_qe; /* BFA callback qelem */ struct bfa_rport_hal_stats_s stats; /* BFA rport statistics */ @@ -102,7 +102,7 @@ struct bfa_uf_buf_s { struct bfa_uf_s { struct list_head qe; /* queue element */ struct bfa_s *bfa; /* bfa instance */ - u16 uf_tag; /* identifying tag f/w messages */ + u16 uf_tag; /* identifying tag fw msgs */ u16 vf_id; u16 src_rport_handle; u16 rsvd; @@ -128,7 +128,7 @@ struct bfa_lps_s { u8 reqq; /* lport request queue */ u8 alpa; /* ALPA for loop topologies */ u32 lp_pid; /* lport port ID */ - bfa_boolean_t fdisc; /* send FDISC instead of FLOGI*/ + bfa_boolean_t fdisc; /* send FDISC instead of FLOGI */ bfa_boolean_t auth_en; /* enable authentication */ bfa_boolean_t auth_req; /* authentication required */ bfa_boolean_t npiv_en; /* NPIV is allowed by peer */ @@ -178,11 +178,6 @@ bfa_status_t bfa_fcport_trunk_disable(struct bfa_s *bfa); bfa_boolean_t bfa_fcport_trunk_query(struct bfa_s *bfa, u32 *bitmap); void bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr); wwn_t bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node); -bfa_status_t bfa_pport_get_stats(struct bfa_s *bfa, - union bfa_pport_stats_u *stats, - bfa_cb_pport_t cbfn, void *cbarg); -bfa_status_t bfa_pport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, - void *cbarg); void bfa_fcport_event_register(struct bfa_s *bfa, void (*event_cbfn) (void *cbarg, bfa_pport_event_t event), void *event_cbarg); @@ -198,14 +193,21 @@ void bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status); void bfa_fcport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon, bfa_boolean_t link_e2e_beacon); void bfa_cb_pport_event(void *cbarg, bfa_pport_event_t event); -void bfa_fcport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr); +void bfa_fcport_qos_get_attr(struct bfa_s *bfa, + struct bfa_qos_attr_s *qos_attr); void bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa, struct bfa_qos_vc_attr_s *qos_vc_attr); bfa_status_t bfa_fcport_get_qos_stats(struct bfa_s *bfa, - union bfa_pport_stats_u *stats, + union bfa_fcport_stats_u *stats, bfa_cb_pport_t cbfn, void *cbarg); bfa_status_t bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg); +bfa_status_t bfa_fcport_get_fcoe_stats(struct bfa_s *bfa, + union bfa_fcport_stats_u *stats, + bfa_cb_pport_t cbfn, void *cbarg); +bfa_status_t bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, + void *cbarg); + bfa_boolean_t bfa_fcport_is_ratelim(struct bfa_s *bfa); bfa_boolean_t bfa_fcport_is_linkup(struct bfa_s *bfa); bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa, diff --git a/drivers/scsi/bfa/include/bfi/bfi.h b/drivers/scsi/bfa/include/bfi/bfi.h index 7042c18e542d..a550e80cabd2 100644 --- a/drivers/scsi/bfa/include/bfi/bfi.h +++ b/drivers/scsi/bfa/include/bfi/bfi.h @@ -143,8 +143,8 @@ enum bfi_mclass { BFI_MC_IOC = 1, /* IO Controller (IOC) */ BFI_MC_DIAG = 2, /* Diagnostic Msgs */ BFI_MC_FLASH = 3, /* Flash message class */ - BFI_MC_CEE = 4, - BFI_MC_FC_PORT = 5, /* FC port */ + BFI_MC_CEE = 4, /* CEE */ + BFI_MC_FCPORT = 5, /* FC port */ BFI_MC_IOCFC = 6, /* FC - IO Controller (IOC) */ BFI_MC_LL = 7, /* Link Layer */ BFI_MC_UF = 8, /* Unsolicited frame receive */ diff --git a/drivers/scsi/bfa/include/bfi/bfi_pport.h b/drivers/scsi/bfa/include/bfi/bfi_pport.h index 5c3d289d986d..50dcf45c7470 100644 --- a/drivers/scsi/bfa/include/bfi/bfi_pport.h +++ b/drivers/scsi/bfa/include/bfi/bfi_pport.h @@ -22,168 +22,97 @@ #pragma pack(1) -enum bfi_pport_h2i { - BFI_PPORT_H2I_ENABLE_REQ = (1), - BFI_PPORT_H2I_DISABLE_REQ = (2), - BFI_PPORT_H2I_GET_STATS_REQ = (3), - BFI_PPORT_H2I_CLEAR_STATS_REQ = (4), - BFI_PPORT_H2I_SET_SVC_PARAMS_REQ = (5), - BFI_PPORT_H2I_ENABLE_RX_VF_TAG_REQ = (6), - BFI_PPORT_H2I_ENABLE_TX_VF_TAG_REQ = (7), - BFI_PPORT_H2I_GET_QOS_STATS_REQ = (8), - BFI_PPORT_H2I_CLEAR_QOS_STATS_REQ = (9), - BFI_FCPORT_H2I_GET_STATS_REQ = (10), - BFI_FCPORT_H2I_CLEAR_STATS_REQ = (11), +enum bfi_fcport_h2i { + BFI_FCPORT_H2I_ENABLE_REQ = (1), + BFI_FCPORT_H2I_DISABLE_REQ = (2), + BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ = (3), + BFI_FCPORT_H2I_STATS_GET_REQ = (4), + BFI_FCPORT_H2I_STATS_CLEAR_REQ = (5), }; -enum bfi_pport_i2h { - BFI_PPORT_I2H_ENABLE_RSP = BFA_I2HM(1), - BFI_PPORT_I2H_DISABLE_RSP = BFA_I2HM(2), - BFI_PPORT_I2H_GET_STATS_RSP = BFA_I2HM(3), - BFI_PPORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4), - BFI_PPORT_I2H_SET_SVC_PARAMS_RSP = BFA_I2HM(5), - BFI_PPORT_I2H_ENABLE_RX_VF_TAG_RSP = BFA_I2HM(6), - BFI_PPORT_I2H_ENABLE_TX_VF_TAG_RSP = BFA_I2HM(7), - BFI_PPORT_I2H_EVENT = BFA_I2HM(8), - BFI_PPORT_I2H_GET_QOS_STATS_RSP = BFA_I2HM(9), - BFI_PPORT_I2H_CLEAR_QOS_STATS_RSP = BFA_I2HM(10), - BFI_FCPORT_I2H_GET_STATS_RSP = BFA_I2HM(11), - BFI_FCPORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(12), +enum bfi_fcport_i2h { + BFI_FCPORT_I2H_ENABLE_RSP = BFA_I2HM(1), + BFI_FCPORT_I2H_DISABLE_RSP = BFA_I2HM(2), + BFI_FCPORT_I2H_SET_SVC_PARAMS_RSP = BFA_I2HM(3), + BFI_FCPORT_I2H_STATS_GET_RSP = BFA_I2HM(4), + BFI_FCPORT_I2H_STATS_CLEAR_RSP = BFA_I2HM(5), + BFI_FCPORT_I2H_EVENT = BFA_I2HM(6), }; /** * Generic REQ type */ -struct bfi_pport_generic_req_s { +struct bfi_fcport_req_s { struct bfi_mhdr_s mh; /* msg header */ - u32 msgtag; /* msgtag for reply */ + u32 msgtag; /* msgtag for reply */ }; /** * Generic RSP type */ -struct bfi_pport_generic_rsp_s { +struct bfi_fcport_rsp_s { struct bfi_mhdr_s mh; /* common msg header */ - u8 status; /* port enable status */ - u8 rsvd[3]; - u32 msgtag; /* msgtag for reply */ + u8 status; /* port enable status */ + u8 rsvd[3]; + u32 msgtag; /* msgtag for reply */ }; /** - * BFI_PPORT_H2I_ENABLE_REQ + * BFI_FCPORT_H2I_ENABLE_REQ */ -struct bfi_pport_enable_req_s { +struct bfi_fcport_enable_req_s { struct bfi_mhdr_s mh; /* msg header */ - u32 rsvd1; - wwn_t nwwn; /* node wwn of physical port */ - wwn_t pwwn; /* port wwn of physical port */ - struct bfa_pport_cfg_s port_cfg; /* port configuration */ - union bfi_addr_u stats_dma_addr; /* DMA address for stats */ - union bfi_addr_u fcport_stats_dma_addr;/*!< DMA address for stats */ - u32 msgtag; /* msgtag for reply */ - u32 rsvd2; + u32 rsvd1; + wwn_t nwwn; /* node wwn of physical port */ + wwn_t pwwn; /* port wwn of physical port */ + struct bfa_pport_cfg_s port_cfg; /* port configuration */ + union bfi_addr_u stats_dma_addr; /* DMA address for stats */ + u32 msgtag; /* msgtag for reply */ + u32 rsvd2; }; /** - * BFI_PPORT_I2H_ENABLE_RSP + * BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ */ -#define bfi_pport_enable_rsp_t struct bfi_pport_generic_rsp_s - -/** - * BFI_PPORT_H2I_DISABLE_REQ - */ -#define bfi_pport_disable_req_t struct bfi_pport_generic_req_s - -/** - * BFI_PPORT_I2H_DISABLE_RSP - */ -#define bfi_pport_disable_rsp_t struct bfi_pport_generic_rsp_s - -/** - * BFI_PPORT_H2I_GET_STATS_REQ - */ -#define bfi_pport_get_stats_req_t struct bfi_pport_generic_req_s - -/** - * BFI_PPORT_I2H_GET_STATS_RSP - */ -#define bfi_pport_get_stats_rsp_t struct bfi_pport_generic_rsp_s - -/** - * BFI_PPORT_H2I_CLEAR_STATS_REQ - */ -#define bfi_pport_clear_stats_req_t struct bfi_pport_generic_req_s - -/** - * BFI_PPORT_I2H_CLEAR_STATS_RSP - */ -#define bfi_pport_clear_stats_rsp_t struct bfi_pport_generic_rsp_s - -/** - * BFI_PPORT_H2I_GET_QOS_STATS_REQ - */ -#define bfi_pport_get_qos_stats_req_t struct bfi_pport_generic_req_s - -/** - * BFI_PPORT_H2I_GET_QOS_STATS_RSP - */ -#define bfi_pport_get_qos_stats_rsp_t struct bfi_pport_generic_rsp_s - -/** - * BFI_PPORT_H2I_CLEAR_QOS_STATS_REQ - */ -#define bfi_pport_clear_qos_stats_req_t struct bfi_pport_generic_req_s - -/** - * BFI_PPORT_H2I_CLEAR_QOS_STATS_RSP - */ -#define bfi_pport_clear_qos_stats_rsp_t struct bfi_pport_generic_rsp_s - -/** - * BFI_PPORT_H2I_SET_SVC_PARAMS_REQ - */ -struct bfi_pport_set_svc_params_req_s { +struct bfi_fcport_set_svc_params_req_s { struct bfi_mhdr_s mh; /* msg header */ - u16 tx_bbcredit; /* Tx credits */ - u16 rsvd; + u16 tx_bbcredit; /* Tx credits */ + u16 rsvd; }; /** - * BFI_PPORT_I2H_SET_SVC_PARAMS_RSP - */ - -/** - * BFI_PPORT_I2H_EVENT + * BFI_FCPORT_I2H_EVENT */ -struct bfi_pport_event_s { +struct bfi_fcport_event_s { struct bfi_mhdr_s mh; /* common msg header */ struct bfa_pport_link_s link_state; }; -union bfi_pport_h2i_msg_u { +/** + * fcport H2I message + */ +union bfi_fcport_h2i_msg_u { struct bfi_mhdr_s *mhdr; - struct bfi_pport_enable_req_s *penable; - struct bfi_pport_generic_req_s *pdisable; - struct bfi_pport_generic_req_s *pgetstats; - struct bfi_pport_generic_req_s *pclearstats; - struct bfi_pport_set_svc_params_req_s *psetsvcparams; - struct bfi_pport_get_qos_stats_req_s *pgetqosstats; - struct bfi_pport_generic_req_s *pclearqosstats; + struct bfi_fcport_enable_req_s *penable; + struct bfi_fcport_req_s *pdisable; + struct bfi_fcport_set_svc_params_req_s *psetsvcparams; + struct bfi_fcport_req_s *pstatsget; + struct bfi_fcport_req_s *pstatsclear; }; -union bfi_pport_i2h_msg_u { +/** + * fcport I2H message + */ +union bfi_fcport_i2h_msg_u { struct bfi_msg_s *msg; - struct bfi_pport_generic_rsp_s *enable_rsp; - struct bfi_pport_disable_rsp_s *disable_rsp; - struct bfi_pport_generic_rsp_s *getstats_rsp; - struct bfi_pport_clear_stats_rsp_s *clearstats_rsp; - struct bfi_pport_set_svc_params_rsp_s *setsvcparasm_rsp; - struct bfi_pport_get_qos_stats_rsp_s *getqosstats_rsp; - struct bfi_pport_clear_qos_stats_rsp_s *clearqosstats_rsp; - struct bfi_pport_event_s *event; + struct bfi_fcport_rsp_s *penable_rsp; + struct bfi_fcport_rsp_s *pdisable_rsp; + struct bfi_fcport_rsp_s *psetsvcparams_rsp; + struct bfi_fcport_rsp_s *pstatsget_rsp; + struct bfi_fcport_rsp_s *pstatsclear_rsp; + struct bfi_fcport_event_s *event; }; #pragma pack() #endif /* __BFI_PPORT_H__ */ - diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_pport.h b/drivers/scsi/bfa/include/defs/bfa_defs_pport.h index 164cfbef9b12..26e5cc78095d 100644 --- a/drivers/scsi/bfa/include/defs/bfa_defs_pport.h +++ b/drivers/scsi/bfa/include/defs/bfa_defs_pport.h @@ -240,73 +240,79 @@ struct bfa_pport_attr_s { * FC Port statistics. */ struct bfa_pport_fc_stats_s { - u64 secs_reset; /* seconds since stats is reset */ - u64 tx_frames; /* transmitted frames */ - u64 tx_words; /* transmitted words */ - u64 rx_frames; /* received frames */ - u64 rx_words; /* received words */ - u64 lip_count; /* LIPs seen */ - u64 nos_count; /* NOS count */ - u64 error_frames; /* errored frames */ - u64 dropped_frames; /* dropped frames */ - u64 link_failures; /* link failure count */ - u64 loss_of_syncs; /* loss of sync count */ - u64 loss_of_signals;/* loss of signal count */ - u64 primseq_errs; /* primitive sequence protocol */ - u64 bad_os_count; /* invalid ordered set */ - u64 err_enc_out; /* Encoding error outside frame */ - u64 invalid_crcs; /* frames received with invalid CRC*/ - u64 undersized_frm; /* undersized frames */ - u64 oversized_frm; /* oversized frames */ - u64 bad_eof_frm; /* frames with bad EOF */ - struct bfa_qos_stats_s qos_stats; /* QoS statistics */ + u64 secs_reset; /* Seconds since stats is reset */ + u64 tx_frames; /* Tx frames */ + u64 tx_words; /* Tx words */ + u64 tx_lip; /* TX LIP */ + u64 tx_nos; /* Tx NOS */ + u64 tx_ols; /* Tx OLS */ + u64 tx_lr; /* Tx LR */ + u64 tx_lrr; /* Tx LRR */ + u64 rx_frames; /* Rx frames */ + u64 rx_words; /* Rx words */ + u64 lip_count; /* Rx LIP */ + u64 nos_count; /* Rx NOS */ + u64 ols_count; /* Rx OLS */ + u64 lr_count; /* Rx LR */ + u64 lrr_count; /* Rx LRR */ + u64 invalid_crcs; /* Rx CRC err frames */ + u64 invalid_crc_gd_eof; /* Rx CRC err good EOF frames */ + u64 undersized_frm; /* Rx undersized frames */ + u64 oversized_frm; /* Rx oversized frames */ + u64 bad_eof_frm; /* Rx frames with bad EOF */ + u64 error_frames; /* Errored frames */ + u64 dropped_frames; /* Dropped frames */ + u64 link_failures; /* Link Failure (LF) count */ + u64 loss_of_syncs; /* Loss of sync count */ + u64 loss_of_signals;/* Loss of signal count */ + u64 primseq_errs; /* Primitive sequence protocol err. */ + u64 bad_os_count; /* Invalid ordered sets */ + u64 err_enc_out; /* Encoding err nonframe_8b10b */ + u64 err_enc; /* Encoding err frame_8b10b */ }; /** * Eth Port statistics. */ struct bfa_pport_eth_stats_s { - u64 secs_reset; /* seconds since stats is reset */ - u64 frame_64; /* both rx and tx counter */ - u64 frame_65_127; /* both rx and tx counter */ - u64 frame_128_255; /* both rx and tx counter */ - u64 frame_256_511; /* both rx and tx counter */ - u64 frame_512_1023; /* both rx and tx counter */ - u64 frame_1024_1518; /* both rx and tx counter */ - u64 frame_1519_1522; /* both rx and tx counter */ - - u64 tx_bytes; - u64 tx_packets; - u64 tx_mcast_packets; - u64 tx_bcast_packets; - u64 tx_control_frame; - u64 tx_drop; - u64 tx_jabber; - u64 tx_fcs_error; - u64 tx_fragments; - - u64 rx_bytes; - u64 rx_packets; - u64 rx_mcast_packets; - u64 rx_bcast_packets; - u64 rx_control_frames; - u64 rx_unknown_opcode; - u64 rx_drop; - u64 rx_jabber; - u64 rx_fcs_error; - u64 rx_alignment_error; - u64 rx_frame_length_error; - u64 rx_code_error; - u64 rx_fragments; - - u64 rx_pause; /* BPC */ - u64 rx_zero_pause; /* BPC Pause cancellation */ - u64 tx_pause; /* BPC */ - u64 tx_zero_pause; /* BPC Pause cancellation */ - u64 rx_fcoe_pause; /* BPC */ - u64 rx_fcoe_zero_pause; /* BPC Pause cancellation */ - u64 tx_fcoe_pause; /* BPC */ - u64 tx_fcoe_zero_pause; /* BPC Pause cancellation */ + u64 secs_reset; /* Seconds since stats is reset */ + u64 frame_64; /* Frames 64 bytes */ + u64 frame_65_127; /* Frames 65-127 bytes */ + u64 frame_128_255; /* Frames 128-255 bytes */ + u64 frame_256_511; /* Frames 256-511 bytes */ + u64 frame_512_1023; /* Frames 512-1023 bytes */ + u64 frame_1024_1518; /* Frames 1024-1518 bytes */ + u64 frame_1519_1522; /* Frames 1519-1522 bytes */ + u64 tx_bytes; /* Tx bytes */ + u64 tx_packets; /* Tx packets */ + u64 tx_mcast_packets; /* Tx multicast packets */ + u64 tx_bcast_packets; /* Tx broadcast packets */ + u64 tx_control_frame; /* Tx control frame */ + u64 tx_drop; /* Tx drops */ + u64 tx_jabber; /* Tx jabber */ + u64 tx_fcs_error; /* Tx FCS error */ + u64 tx_fragments; /* Tx fragments */ + u64 rx_bytes; /* Rx bytes */ + u64 rx_packets; /* Rx packets */ + u64 rx_mcast_packets; /* Rx multicast packets */ + u64 rx_bcast_packets; /* Rx broadcast packets */ + u64 rx_control_frames; /* Rx control frames */ + u64 rx_unknown_opcode; /* Rx unknown opcode */ + u64 rx_drop; /* Rx drops */ + u64 rx_jabber; /* Rx jabber */ + u64 rx_fcs_error; /* Rx FCS errors */ + u64 rx_alignment_error; /* Rx alignment errors */ + u64 rx_frame_length_error; /* Rx frame len errors */ + u64 rx_code_error; /* Rx code errors */ + u64 rx_fragments; /* Rx fragments */ + u64 rx_pause; /* Rx pause */ + u64 rx_zero_pause; /* Rx zero pause */ + u64 tx_pause; /* Tx pause */ + u64 tx_zero_pause; /* Tx zero pause */ + u64 rx_fcoe_pause; /* Rx fcoe pause */ + u64 rx_fcoe_zero_pause; /* Rx FCoE zero pause */ + u64 tx_fcoe_pause; /* Tx FCoE pause */ + u64 tx_fcoe_zero_pause; /* Tx FCoE zero pause */ }; /** -- cgit v1.2.3 From 25e2934c26f5efaea156c9fda4457d01a8bb44e1 Mon Sep 17 00:00:00 2001 From: Krishna Gudipati Date: Fri, 5 Mar 2010 19:38:17 -0800 Subject: [SCSI] bfa: FCS and include file changes. MS module did not invoke fdmi offline in all cases, call fdmi offline when ms module receives a port offline, so that fdmi offline is from one place in the ms module. Make changes to handle 10G speed in the conversion routine. Replaced the usage of bfa_adapter_attr_s struct with specific API's. Signed-off-by: Krishna Gudipati Signed-off-by: James Bottomley --- drivers/scsi/bfa/fabric.c | 8 +++--- drivers/scsi/bfa/fcbuild.h | 6 +++++ drivers/scsi/bfa/fdmi.c | 33 ++++++++----------------- drivers/scsi/bfa/include/defs/bfa_defs_status.h | 5 ++-- drivers/scsi/bfa/ms.c | 5 +--- 5 files changed, 22 insertions(+), 35 deletions(-) diff --git a/drivers/scsi/bfa/fabric.c b/drivers/scsi/bfa/fabric.c index b4e05ad1b47e..8166e9745ec0 100644 --- a/drivers/scsi/bfa/fabric.c +++ b/drivers/scsi/bfa/fabric.c @@ -562,17 +562,15 @@ void bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric) { struct bfa_port_cfg_s *port_cfg = &fabric->bport.port_cfg; - struct bfa_adapter_attr_s adapter_attr; + char model[BFA_ADAPTER_MODEL_NAME_LEN] = {0}; struct bfa_fcs_driver_info_s *driver_info = &fabric->fcs->driver_info; - bfa_os_memset((void *)&adapter_attr, 0, - sizeof(struct bfa_adapter_attr_s)); - bfa_ioc_get_adapter_attr(&fabric->fcs->bfa->ioc, &adapter_attr); + bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model); /* * Model name/number */ - strncpy((char *)&port_cfg->sym_name, adapter_attr.model, + strncpy((char *)&port_cfg->sym_name, model, BFA_FCS_PORT_SYMBNAME_MODEL_SZ); strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR, sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR)); diff --git a/drivers/scsi/bfa/fcbuild.h b/drivers/scsi/bfa/fcbuild.h index 8fa7f270ef7b..981d98d542b9 100644 --- a/drivers/scsi/bfa/fcbuild.h +++ b/drivers/scsi/bfa/fcbuild.h @@ -72,6 +72,9 @@ fc_rpsc_operspeed_to_bfa_speed(enum fc_rpsc_op_speed_s speed) case RPSC_OP_SPEED_8G: return BFA_PPORT_SPEED_8GBPS; + case RPSC_OP_SPEED_10G: + return BFA_PPORT_SPEED_10GBPS; + default: return BFA_PPORT_SPEED_UNKNOWN; } @@ -97,6 +100,9 @@ fc_bfa_speed_to_rpsc_operspeed(enum bfa_pport_speed op_speed) case BFA_PPORT_SPEED_8GBPS: return RPSC_OP_SPEED_8G; + case BFA_PPORT_SPEED_10GBPS: + return RPSC_OP_SPEED_10G; + default: return RPSC_OP_SPEED_NOT_EST; } diff --git a/drivers/scsi/bfa/fdmi.c b/drivers/scsi/bfa/fdmi.c index 2c9e7132a368..8f17076d1a87 100644 --- a/drivers/scsi/bfa/fdmi.c +++ b/drivers/scsi/bfa/fdmi.c @@ -1114,36 +1114,23 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_port_fdmi_s *fdmi, { struct bfa_fcs_port_s *port = fdmi->ms->port; struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info; - struct bfa_adapter_attr_s adapter_attr; bfa_os_memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s)); - bfa_os_memset(&adapter_attr, 0, sizeof(struct bfa_adapter_attr_s)); - bfa_ioc_get_adapter_attr(&port->fcs->bfa->ioc, &adapter_attr); - - strncpy(hba_attr->manufacturer, adapter_attr.manufacturer, - sizeof(adapter_attr.manufacturer)); - - strncpy(hba_attr->serial_num, adapter_attr.serial_num, - sizeof(adapter_attr.serial_num)); - - strncpy(hba_attr->model, adapter_attr.model, sizeof(hba_attr->model)); - - strncpy(hba_attr->model_desc, adapter_attr.model_descr, - sizeof(hba_attr->model_desc)); - - strncpy(hba_attr->hw_version, adapter_attr.hw_ver, - sizeof(hba_attr->hw_version)); + bfa_ioc_get_adapter_manufacturer(&port->fcs->bfa->ioc, + hba_attr->manufacturer); + bfa_ioc_get_adapter_serial_num(&port->fcs->bfa->ioc, + hba_attr->serial_num); + bfa_ioc_get_adapter_model(&port->fcs->bfa->ioc, hba_attr->model); + bfa_ioc_get_adapter_model(&port->fcs->bfa->ioc, hba_attr->model_desc); + bfa_ioc_get_pci_chip_rev(&port->fcs->bfa->ioc, hba_attr->hw_version); + bfa_ioc_get_adapter_optrom_ver(&port->fcs->bfa->ioc, + hba_attr->option_rom_ver); + bfa_ioc_get_adapter_fw_ver(&port->fcs->bfa->ioc, hba_attr->fw_version); strncpy(hba_attr->driver_version, (char *)driver_info->version, sizeof(hba_attr->driver_version)); - strncpy(hba_attr->option_rom_ver, adapter_attr.optrom_ver, - sizeof(hba_attr->option_rom_ver)); - - strncpy(hba_attr->fw_version, adapter_attr.fw_ver, - sizeof(hba_attr->fw_version)); - strncpy(hba_attr->os_name, driver_info->host_os_name, sizeof(hba_attr->os_name)); diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_status.h b/drivers/scsi/bfa/include/defs/bfa_defs_status.h index d8a74ebfe1ae..4374494bd566 100644 --- a/drivers/scsi/bfa/include/defs/bfa_defs_status.h +++ b/drivers/scsi/bfa/include/defs/bfa_defs_status.h @@ -213,7 +213,7 @@ enum bfa_status { * loaded */ BFA_STATUS_CARD_TYPE_MISMATCH = 131, /* Card type mismatch */ BFA_STATUS_BAD_ASICBLK = 132, /* Bad ASIC block */ - BFA_STATUS_NO_DRIVER = 133, /* Storage/Ethernet driver not loaded */ + BFA_STATUS_NO_DRIVER = 133, /* Brocade adapter/driver not installed or loaded */ BFA_STATUS_INVALID_MAC = 134, /* Invalid mac address */ BFA_STATUS_IM_NO_VLAN = 135, /* No VLANs configured on the adapter */ BFA_STATUS_IM_ETH_LB_FAILED = 136, /* Ethernet loopback test failed */ @@ -228,8 +228,7 @@ enum bfa_status { BFA_STATUS_IM_GET_INETCFG_FAILED = 142, /* Acquiring Network Subsytem * handle Failed. Please try * after some time */ - BFA_STATUS_IM_NOT_BOUND = 143, /* Brocade 10G Ethernet Service is not - * Enabled on this port */ + BFA_STATUS_IM_NOT_BOUND = 143, /* IM driver is not active */ BFA_STATUS_INSUFFICIENT_PERMS = 144, /* User doesn't have sufficient * permissions to execute the BCU * application */ diff --git a/drivers/scsi/bfa/ms.c b/drivers/scsi/bfa/ms.c index e6db5fd301f2..5e8c8dee6c97 100644 --- a/drivers/scsi/bfa/ms.c +++ b/drivers/scsi/bfa/ms.c @@ -230,10 +230,6 @@ bfa_fcs_port_ms_sm_online(struct bfa_fcs_port_ms_s *ms, switch (event) { case MSSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline); - /* - * now invoke MS related sub-modules - */ - bfa_fcs_port_fdmi_offline(ms); break; case MSSM_EVENT_PORT_FABRIC_RSCN: @@ -735,6 +731,7 @@ bfa_fcs_port_ms_offline(struct bfa_fcs_port_s *port) ms->port = port; bfa_sm_send_event(ms, MSSM_EVENT_PORT_OFFLINE); + bfa_fcs_port_fdmi_offline(ms); } void -- cgit v1.2.3 From 95aa060decd2472d319c3f12b0b1b699a5f35058 Mon Sep 17 00:00:00 2001 From: Krishna Gudipati Date: Fri, 5 Mar 2010 19:38:27 -0800 Subject: [SCSI] bfa: Handle SCSI IO underrun case. When IO is completed with underrun and with good SCSI status, check if the transferred bytes against scsi_cmnd->underflow, which is set to minimum number of bytes that must be transferred for this command, if is less than required minimum, complete the IO with DID_ERROR. Signed-off-by: Krishna Gudipati Signed-off-by: James Bottomley --- drivers/scsi/bfa/bfad_im.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c index cee3d89d1412..fb7aefaba129 100644 --- a/drivers/scsi/bfa/bfad_im.c +++ b/drivers/scsi/bfa/bfad_im.c @@ -43,11 +43,11 @@ bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio, struct bfad_s *bfad = drv; struct bfad_itnim_data_s *itnim_data; struct bfad_itnim_s *itnim; + u8 host_status = DID_OK; switch (io_status) { case BFI_IOIM_STS_OK: bfa_trc(bfad, scsi_status); - cmnd->result = ScsiResult(DID_OK, scsi_status); scsi_set_resid(cmnd, 0); if (sns_len > 0) { @@ -56,8 +56,18 @@ bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio, sns_len = SCSI_SENSE_BUFFERSIZE; memcpy(cmnd->sense_buffer, sns_info, sns_len); } - if (residue > 0) + if (residue > 0) { + bfa_trc(bfad, residue); scsi_set_resid(cmnd, residue); + if (!sns_len && (scsi_status == SAM_STAT_GOOD) && + (scsi_bufflen(cmnd) - residue) < + cmnd->underflow) { + bfa_trc(bfad, 0); + host_status = DID_ERROR; + } + } + cmnd->result = ScsiResult(host_status, scsi_status); + break; case BFI_IOIM_STS_ABORTED: -- cgit v1.2.3 From d1c61f8ef582055569de76a86fa1984f9b6698cf Mon Sep 17 00:00:00 2001 From: Krishna Gudipati Date: Fri, 5 Mar 2010 19:38:44 -0800 Subject: [SCSI] bfa: Remove unused header files and did some cleanup. Signed-off-by: Krishna Gudipati Signed-off-by: James Bottomley --- drivers/scsi/bfa/bfa_fcport.c | 4 +- drivers/scsi/bfa/bfa_ioc_cb.c | 12 ++-- drivers/scsi/bfa/bfa_ioc_ct.c | 28 ++++---- drivers/scsi/bfa/bfad_attr.h | 9 --- drivers/scsi/bfa/bfad_drv.h | 10 --- drivers/scsi/bfa/bfad_im.c | 10 --- drivers/scsi/bfa/bfad_im.h | 5 -- drivers/scsi/bfa/fcpim.c | 1 - drivers/scsi/bfa/include/defs/bfa_defs_driver.h | 3 +- drivers/scsi/bfa/include/defs/bfa_defs_im_common.h | 32 --------- drivers/scsi/bfa/include/defs/bfa_defs_im_team.h | 72 --------------------- drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h | 1 - drivers/scsi/bfa/include/protocol/pcifw.h | 75 ---------------------- 13 files changed, 24 insertions(+), 238 deletions(-) delete mode 100644 drivers/scsi/bfa/include/defs/bfa_defs_im_common.h delete mode 100644 drivers/scsi/bfa/include/defs/bfa_defs_im_team.h delete mode 100644 drivers/scsi/bfa/include/protocol/pcifw.h diff --git a/drivers/scsi/bfa/bfa_fcport.c b/drivers/scsi/bfa/bfa_fcport.c index d109e651b1c5..c589488db0c1 100644 --- a/drivers/scsi/bfa/bfa_fcport.c +++ b/drivers/scsi/bfa/bfa_fcport.c @@ -853,9 +853,9 @@ bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, static void bfa_fcport_qresume(void *cbarg) { - struct bfa_fcport_s *port = cbarg; + struct bfa_fcport_s *fcport = cbarg; - bfa_sm_send_event(port, BFA_FCPORT_SM_QRESUME); + bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME); } static void diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c index 1fa052ef9ce0..3ce85319f739 100644 --- a/drivers/scsi/bfa/bfa_ioc_cb.c +++ b/drivers/scsi/bfa/bfa_ioc_cb.c @@ -63,13 +63,13 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc) ioc->ioc_hwif = &hwif_cb; } -static uint32_t * -bfa_ioc_cb_fwimg_get_chunk(struct bfa_ioc_s *ioc, uint32_t off) +static u32 * +bfa_ioc_cb_fwimg_get_chunk(struct bfa_ioc_s *ioc, u32 off) { return bfi_image_cb_get_chunk(off); } -static uint32_t +static u32 bfa_ioc_cb_fwimg_get_size(struct bfa_ioc_s *ioc) { return bfi_image_cb_size; @@ -102,7 +102,7 @@ bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc) /** * Host to LPU mailbox message addresses */ -static struct { uint32_t hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = { +static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = { { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 }, { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 } }; @@ -110,7 +110,7 @@ static struct { uint32_t hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = { /** * Host <-> LPU mailbox command/status registers */ -static struct { uint32_t hfn, lpu; } iocreg_mbcmd[] = { +static struct { u32 hfn, lpu; } iocreg_mbcmd[] = { { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT }, { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT } }; @@ -192,7 +192,7 @@ static bfa_status_t bfa_ioc_cb_pll_init(struct bfa_ioc_s *ioc) { bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; - uint32_t pll_sclk, pll_fclk; + u32 pll_sclk, pll_fclk; /* * Hold semaphore so that nobody can access the chip during init. diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c index 2431922c34a4..20b58ad5f95c 100644 --- a/drivers/scsi/bfa/bfa_ioc_ct.c +++ b/drivers/scsi/bfa/bfa_ioc_ct.c @@ -33,9 +33,9 @@ BFA_TRC_FILE(CNA, IOC_CT); static bfa_status_t bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc); static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc); static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc); -static uint32_t* bfa_ioc_ct_fwimg_get_chunk(struct bfa_ioc_s *ioc, - uint32_t off); -static uint32_t bfa_ioc_ct_fwimg_get_size(struct bfa_ioc_s *ioc); +static u32* bfa_ioc_ct_fwimg_get_chunk(struct bfa_ioc_s *ioc, + u32 off); +static u32 bfa_ioc_ct_fwimg_get_size(struct bfa_ioc_s *ioc); static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc); static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc); static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix); @@ -64,13 +64,13 @@ bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc) ioc->ioc_hwif = &hwif_ct; } -static uint32_t* -bfa_ioc_ct_fwimg_get_chunk(struct bfa_ioc_s *ioc, uint32_t off) +static u32* +bfa_ioc_ct_fwimg_get_chunk(struct bfa_ioc_s *ioc, u32 off) { return bfi_image_ct_get_chunk(off); } -static uint32_t +static u32 bfa_ioc_ct_fwimg_get_size(struct bfa_ioc_s *ioc) { return bfi_image_ct_size; @@ -83,7 +83,7 @@ static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc) { enum bfi_ioc_state ioc_fwstate; - uint32_t usecnt; + u32 usecnt; struct bfi_ioc_image_hdr_s fwhdr; /** @@ -142,7 +142,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc) static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc) { - uint32_t usecnt; + u32 usecnt; /** * Firmware lock is relevant only for CNA. @@ -184,7 +184,7 @@ bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc) /** * Host to LPU mailbox message addresses */ -static struct { uint32_t hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = { +static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = { { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 }, { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }, { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 }, @@ -194,7 +194,7 @@ static struct { uint32_t hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = { /** * Host <-> LPU mailbox command/status registers - port 0 */ -static struct { uint32_t hfn, lpu; } iocreg_mbcmd_p0[] = { +static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = { { HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT }, { HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT }, { HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT }, @@ -204,7 +204,7 @@ static struct { uint32_t hfn, lpu; } iocreg_mbcmd_p0[] = { /** * Host <-> LPU mailbox command/status registers - port 1 */ -static struct { uint32_t hfn, lpu; } iocreg_mbcmd_p1[] = { +static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = { { HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT }, { HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT }, { HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT }, @@ -274,7 +274,7 @@ static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc) { bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; - uint32_t r32; + u32 r32; /** * For catapult, base port id on personality register and IOC type @@ -294,7 +294,7 @@ static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix) { bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; - uint32_t r32, mode; + u32 r32, mode; r32 = bfa_reg_read(rb + FNC_PERS_REG); bfa_trc(ioc, r32); @@ -324,7 +324,7 @@ static bfa_status_t bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc) { bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; - uint32_t pll_sclk, pll_fclk, r32; + u32 pll_sclk, pll_fclk, r32; /* * Hold semaphore so that nobody can access the chip during init. diff --git a/drivers/scsi/bfa/bfad_attr.h b/drivers/scsi/bfa/bfad_attr.h index 4d3312da6a81..bf0102076508 100644 --- a/drivers/scsi/bfa/bfad_attr.h +++ b/drivers/scsi/bfa/bfad_attr.h @@ -17,9 +17,6 @@ #ifndef __BFAD_ATTR_H__ #define __BFAD_ATTR_H__ -/** - * bfad_attr.h VMware driver configuration interface module. - */ /** * FC_transport_template FC transport template @@ -52,12 +49,6 @@ bfad_im_get_starget_port_name(struct scsi_target *starget); void bfad_im_get_host_port_id(struct Scsi_Host *shost); -/** - * FC transport template entry, issue a LIP. - */ -int -bfad_im_issue_fc_host_lip(struct Scsi_Host *shost); - struct Scsi_Host* bfad_os_starget_to_shost(struct scsi_target *starget); diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h index 8617a1aa8b25..2ccd0f2ea6d1 100644 --- a/drivers/scsi/bfa/bfad_drv.h +++ b/drivers/scsi/bfa/bfad_drv.h @@ -196,11 +196,6 @@ struct bfad_s { bfa_boolean_t ipfc_enabled; union bfad_tmp_buf tmp_buf; struct fc_host_statistics link_stats; - - struct kobject *bfa_kobj; - struct kobject *ioc_kobj; - struct kobject *pport_kobj; - struct kobject *lport_kobj; }; /* @@ -286,11 +281,6 @@ void bfad_drv_uninit(struct bfad_s *bfad); void bfad_drv_log_level_set(struct bfad_s *bfad); bfa_status_t bfad_fc4_module_init(void); void bfad_fc4_module_exit(void); - -bfa_status_t bfad_os_kthread_create(struct bfad_s *bfad); -void bfad_os_kthread_stop(struct bfad_s *bfad); -void bfad_os_kthread_wakeup(struct bfad_s *bfad); -int bfad_os_kthread_should_stop(void); int bfad_worker (void *ptr); void bfad_pci_remove(struct pci_dev *pdev); diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c index fb7aefaba129..f9fc67a25bf2 100644 --- a/drivers/scsi/bfa/bfad_im.c +++ b/drivers/scsi/bfa/bfad_im.c @@ -508,16 +508,6 @@ void bfa_fcb_itnim_tov(struct bfad_itnim_s *itnim) itnim->state = ITNIM_STATE_TIMEOUT; } -/** - * Path TOV processing begin notification -- dummy for linux - */ -void -bfa_fcb_itnim_tov_begin(struct bfad_itnim_s *itnim) -{ -} - - - /** * Allocate a Scsi_Host for a port. */ diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h index 189a5b29e21a..85ab2da21321 100644 --- a/drivers/scsi/bfa/bfad_im.h +++ b/drivers/scsi/bfa/bfad_im.h @@ -23,7 +23,6 @@ #define FCPI_NAME " fcpim" -void bfad_flags_set(struct bfad_s *bfad, u32 flags); bfa_status_t bfad_im_module_init(void); void bfad_im_module_exit(void); bfa_status_t bfad_im_probe(struct bfad_s *bfad); @@ -126,7 +125,6 @@ bfa_status_t bfad_os_thread_workq(struct bfad_s *bfad); void bfad_os_destroy_workq(struct bfad_im_s *im); void bfad_os_itnim_process(struct bfad_itnim_s *itnim_drv); void bfad_os_fc_host_init(struct bfad_im_port_s *im_port); -void bfad_os_init_work(struct bfad_im_port_s *im_port); void bfad_os_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port); void bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim, @@ -136,9 +134,6 @@ struct bfad_itnim_s *bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id); int bfad_os_scsi_add_host(struct Scsi_Host *shost, struct bfad_im_port_s *im_port, struct bfad_s *bfad); -/* - * scsi_host_template entries - */ void bfad_im_itnim_unmap(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim); diff --git a/drivers/scsi/bfa/fcpim.c b/drivers/scsi/bfa/fcpim.c index ef50ec2a1950..8ae4a2cfa85b 100644 --- a/drivers/scsi/bfa/fcpim.c +++ b/drivers/scsi/bfa/fcpim.c @@ -678,7 +678,6 @@ bfa_cb_itnim_tov_begin(void *cb_arg) struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cb_arg; bfa_trc(itnim->fcs, itnim->rport->pwwn); - bfa_fcb_itnim_tov_begin(itnim->itnim_drv); } /** diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_driver.h b/drivers/scsi/bfa/include/defs/bfa_defs_driver.h index 57049805762b..50382dd2ab41 100644 --- a/drivers/scsi/bfa/include/defs/bfa_defs_driver.h +++ b/drivers/scsi/bfa/include/defs/bfa_defs_driver.h @@ -21,6 +21,7 @@ /** * Driver statistics */ +struct bfa_driver_stats_s { u16 tm_io_abort; u16 tm_io_abort_comp; u16 tm_lun_reset; @@ -34,7 +35,7 @@ u64 output_req; u64 input_words; u64 output_words; -} bfa_driver_stats_t; +}; #endif /* __BFA_DEFS_DRIVER_H__ */ diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_im_common.h b/drivers/scsi/bfa/include/defs/bfa_defs_im_common.h deleted file mode 100644 index 9ccf53bef65a..000000000000 --- a/drivers/scsi/bfa/include/defs/bfa_defs_im_common.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_DEFS_IM_COMMON_H__ -#define __BFA_DEFS_IM_COMMON_H__ - -#define BFA_ADAPTER_NAME_LEN 256 -#define BFA_ADAPTER_GUID_LEN 256 -#define RESERVED_VLAN_NAME L"PORT VLAN" -#define PASSTHRU_VLAN_NAME L"PASSTHRU VLAN" - - u64 tx_pkt_cnt; - u64 rx_pkt_cnt; - u32 duration; - u8 status; -} bfa_im_stats_t, *pbfa_im_stats_t; - -#endif /* __BFA_DEFS_IM_COMMON_H__ */ diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_im_team.h b/drivers/scsi/bfa/include/defs/bfa_defs_im_team.h deleted file mode 100644 index a486a7eb81d6..000000000000 --- a/drivers/scsi/bfa/include/defs/bfa_defs_im_team.h +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#ifndef __BFA_DEFS_IM_TEAM_H__ -#define __BFA_DEFS_IM_TEAM_H__ - -#include - -#define BFA_TEAM_MAX_PORTS 8 -#define BFA_TEAM_NAME_LEN 256 -#define BFA_MAX_NUM_TEAMS 16 -#define BFA_TEAM_INVALID_DELAY -1 - - BFA_LACP_RATE_SLOW = 1, - BFA_LACP_RATE_FAST -} bfa_im_lacp_rate_t; - - BFA_TEAM_MODE_FAIL_OVER = 1, - BFA_TEAM_MODE_FAIL_BACK, - BFA_TEAM_MODE_LACP, - BFA_TEAM_MODE_NONE -} bfa_im_team_mode_t; - - BFA_XMIT_POLICY_L2 = 1, - BFA_XMIT_POLICY_L3_L4 -} bfa_im_xmit_policy_t; - - bfa_im_team_mode_t team_mode; - bfa_im_lacp_rate_t lacp_rate; - bfa_im_xmit_policy_t xmit_policy; - int delay; - wchar_t primary[BFA_ADAPTER_NAME_LEN]; - wchar_t preferred_primary[BFA_ADAPTER_NAME_LEN]; - mac_t mac; - u16 num_ports; - u16 num_vlans; - u16 vlan_list[BFA_MAX_VLANS_PER_PORT]; - wchar_t team_guid_list[BFA_TEAM_MAX_PORTS][BFA_ADAPTER_GUID_LEN]; - wchar_t ioc_name_list[BFA_TEAM_MAX_PORTS][BFA_ADAPTER_NAME_LEN]; -} bfa_im_team_attr_t; - - wchar_t team_name[BFA_TEAM_NAME_LEN]; - bfa_im_xmit_policy_t xmit_policy; - int delay; - wchar_t primary[BFA_ADAPTER_NAME_LEN]; - wchar_t preferred_primary[BFA_ADAPTER_NAME_LEN]; -} bfa_im_team_edit_t, *pbfa_im_team_edit_t; - - wchar_t team_name[BFA_TEAM_NAME_LEN]; - bfa_im_team_mode_t team_mode; - mac_t mac; -} bfa_im_team_info_t; - - bfa_im_team_info_t team_info[BFA_MAX_NUM_TEAMS]; - u16 num_teams; -} bfa_im_team_list_t, *pbfa_im_team_list_t; - -#endif /* __BFA_DEFS_IM_TEAM_H__ */ diff --git a/drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h b/drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h index a6c70aee0aa3..52585d3dd891 100644 --- a/drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h +++ b/drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h @@ -70,7 +70,6 @@ void bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv); */ void bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv); -void bfa_fcb_itnim_tov_begin(struct bfad_itnim_s *itnim_drv); void bfa_fcb_itnim_tov(struct bfad_itnim_s *itnim_drv); #endif /* __BFAD_FCB_FCPIM_H__ */ diff --git a/drivers/scsi/bfa/include/protocol/pcifw.h b/drivers/scsi/bfa/include/protocol/pcifw.h deleted file mode 100644 index 6830dc3ee58a..000000000000 --- a/drivers/scsi/bfa/include/protocol/pcifw.h +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. - * All rights reserved - * www.brocade.com - * - * Linux driver for Brocade Fibre Channel Host Bus Adapter. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License (GPL) Version 2 as - * published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -/** - * pcifw.h PCI FW related headers - */ - -#ifndef __PCIFW_H__ -#define __PCIFW_H__ - -#pragma pack(1) - -struct pnp_hdr_s{ - u32 signature; /* "$PnP" */ - u8 rev; /* Struct revision */ - u8 len; /* Header structure len in multiples - * of 16 bytes */ - u16 off; /* Offset to next header 00 if none */ - u8 rsvd; /* Reserved byte */ - u8 cksum; /* 8-bit checksum for this header */ - u32 pnp_dev_id; /* PnP Device Id */ - u16 mfstr; /* Pointer to manufacturer string */ - u16 prstr; /* Pointer to product string */ - u8 devtype[3]; /* Device Type Code */ - u8 devind; /* Device Indicator */ - u16 bcventr; /* Bootstrap entry vector */ - u16 rsvd2; /* Reserved */ - u16 sriv; /* Static resource information vector */ -}; - -struct pci_3_0_ds_s{ - u32 sig; /* Signature "PCIR" */ - u16 vendid; /* Vendor ID */ - u16 devid; /* Device ID */ - u16 devlistoff; /* Device List Offset */ - u16 len; /* PCI Data Structure Length */ - u8 rev; /* PCI Data Structure Revision */ - u8 clcode[3]; /* Class Code */ - u16 imglen; /* Code image length in multiples of - * 512 bytes */ - u16 coderev; /* Revision level of code/data */ - u8 codetype; /* Code type 0x00 - BIOS */ - u8 indr; /* Last image indicator */ - u16 mrtimglen; /* Max Run Time Image Length */ - u16 cuoff; /* Config Utility Code Header Offset */ - u16 dmtfclp; /* DMTF CLP entry point offset */ -}; - -struct pci_optrom_hdr_s{ - u16 sig; /* Signature 0x55AA */ - u8 len; /* Option ROM length in units of 512 bytes */ - u8 inivec[3]; /* Initialization vector */ - u8 rsvd[16]; /* Reserved field */ - u16 verptr; /* Pointer to version string - private */ - u16 pcids; /* Pointer to PCI data structure */ - u16 pnphdr; /* Pointer to PnP expansion header */ -}; - -#pragma pack() - -#endif -- cgit v1.2.3 From d55f88f0275e4b21435957d3d354a79bb9edeec7 Mon Sep 17 00:00:00 2001 From: Krishna Gudipati Date: Fri, 5 Mar 2010 19:38:52 -0800 Subject: [SCSI] bfa: Update the driver version to 2.1.2.1. Upgrade the upstream driver from 2.0.0.0 to 2.1.2.1. Signed-off-by: Krishna Gudipati Signed-off-by: James Bottomley --- drivers/scsi/bfa/bfad_drv.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h index 2ccd0f2ea6d1..107848cd3b6d 100644 --- a/drivers/scsi/bfa/bfad_drv.h +++ b/drivers/scsi/bfa/bfad_drv.h @@ -46,7 +46,7 @@ #ifdef BFA_DRIVER_VERSION #define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION #else -#define BFAD_DRIVER_VERSION "2.0.0.0" +#define BFAD_DRIVER_VERSION "2.1.2.1" #endif -- cgit v1.2.3 From a8941dad1f12b4e8a87a517ed27f29d0209c817c Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 8 Mar 2010 13:33:17 +0900 Subject: sh: Support CPU affinity masks for INTC controllers. This hooks up the ->set_affinity() for the INTC controllers, which can be done as just a simple copy of the cpumask. The enable/disable paths already handle SMP register strides, so we just test the affinity mask in these paths to determine which strides to skip over. The early enable/disable path happens prior to the IRQs being registered, so we have no affinity mask established at that point, in which case we just default to CPU_MASK_ALL. This is left as it is to permit the force enable/disable code to retain existing semantics. Signed-off-by: Paul Mundt --- drivers/sh/intc.c | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c index 3a5a17db9474..b8983fed76f5 100644 --- a/drivers/sh/intc.c +++ b/drivers/sh/intc.c @@ -2,7 +2,7 @@ * Shared interrupt handling code for IPR and INTC2 types of IRQs. * * Copyright (C) 2007, 2008 Magnus Damm - * Copyright (C) 2009 Paul Mundt + * Copyright (C) 2009, 2010 Paul Mundt * * Based on intc2.c and ipr.c * @@ -26,6 +26,7 @@ #include #include #include +#include #define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \ ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \ @@ -234,6 +235,10 @@ static inline void _intc_enable(unsigned int irq, unsigned long handle) unsigned int cpu; for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) { +#ifdef CONFIG_SMP + if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity)) + continue; +#endif addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu); intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\ [_INTC_FN(handle)], irq); @@ -253,6 +258,10 @@ static void intc_disable(unsigned int irq) unsigned int cpu; for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) { +#ifdef CONFIG_SMP + if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity)) + continue; +#endif addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu); intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\ [_INTC_FN(handle)], irq); @@ -301,6 +310,23 @@ static int intc_set_wake(unsigned int irq, unsigned int on) return 0; /* allow wakeup, but setup hardware in intc_suspend() */ } +#ifdef CONFIG_SMP +/* + * This is held with the irq desc lock held, so we don't require any + * additional locking here at the intc desc level. The affinity mask is + * later tested in the enable/disable paths. + */ +static int intc_set_affinity(unsigned int irq, const struct cpumask *cpumask) +{ + if (!cpumask_intersects(cpumask, cpu_online_mask)) + return -1; + + cpumask_copy(irq_to_desc(irq)->affinity, cpumask); + + return 0; +} +#endif + static void intc_mask_ack(unsigned int irq) { struct intc_desc_int *d = get_intc_desc(irq); @@ -843,6 +869,9 @@ void __init register_intc_controller(struct intc_desc *desc) d->chip.shutdown = intc_disable; d->chip.set_type = intc_set_sense; d->chip.set_wake = intc_set_wake; +#ifdef CONFIG_SMP + d->chip.set_affinity = intc_set_affinity; +#endif if (hw->ack_regs) { for (i = 0; i < hw->nr_ack_regs; i++) -- cgit v1.2.3 From 40d1f0048289b1baed859baee06878417fae540e Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 8 Mar 2010 21:03:21 +0900 Subject: sh: Fix up uncached offset for legacy 29-bit mode. The uncached_start was being set up properly for 32-bit but managed to break 29-bit in the process, fix it up. Signed-off-by: Paul Mundt --- arch/sh/mm/uncached.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/sh/mm/uncached.c b/arch/sh/mm/uncached.c index cf20a5c5136a..2ef57efeb225 100644 --- a/arch/sh/mm/uncached.c +++ b/arch/sh/mm/uncached.c @@ -1,6 +1,7 @@ #include #include #include +#include /* * This is the offset of the uncached section from its cached alias. @@ -23,7 +24,11 @@ int virt_addr_uncached(unsigned long kaddr) void __init uncached_init(void) { +#ifdef CONFIG_29BIT + uncached_start = P2SEG; +#else uncached_start = memory_end; +#endif uncached_end = uncached_start + uncached_size; } -- cgit v1.2.3 From 0d9dc7c8b9b7fa0f53647423b41056ee1beed735 Mon Sep 17 00:00:00 2001 From: Gal Rosen Date: Thu, 21 Jan 2010 10:15:32 +0200 Subject: [SCSI] scsi_transport_fc: Fix synchronization issue while deleting vport The issue occur while deleting 60 virtual ports through the sys interface /sys/class/fc_vports/vport-X/vport_delete. It happen while in a mistake each request sent twice for the same vport. This interface is asynchronous, entering the delete request into a work queue, allowing more than one request to enter to the delete work queue. The result is a NULL pointer. The first request already delete the vport, while the second request got a pointer to the vport before the device destroyed. Re-create vport later cause system freeze. Solution: Check vport flags before entering the request to the work queue. [jejb: fixed int<->long problem on spinlock flags variable] Signed-off-by: Gal Rosen Acked-by: James Smart Cc: Stable Tree Signed-off-by: James Bottomley --- drivers/scsi/scsi_transport_fc.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 79660ee3e211..1d5b72173dd8 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -1232,6 +1232,15 @@ store_fc_vport_delete(struct device *dev, struct device_attribute *attr, { struct fc_vport *vport = transport_class_to_vport(dev); struct Scsi_Host *shost = vport_to_shost(vport); + unsigned long flags; + + spin_lock_irqsave(shost->host_lock, flags); + if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) { + spin_unlock_irqrestore(shost->host_lock, flags); + return -EBUSY; + } + vport->flags |= FC_VPORT_DELETING; + spin_unlock_irqrestore(shost->host_lock, flags); fc_queue_work(shost, &vport->vport_delete_work); return count; @@ -1821,6 +1830,9 @@ store_fc_host_vport_delete(struct device *dev, struct device_attribute *attr, list_for_each_entry(vport, &fc_host->vports, peers) { if ((vport->channel == 0) && (vport->port_name == wwpn) && (vport->node_name == wwnn)) { + if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) + break; + vport->flags |= FC_VPORT_DELETING; match = 1; break; } @@ -3370,18 +3382,6 @@ fc_vport_terminate(struct fc_vport *vport) unsigned long flags; int stat; - spin_lock_irqsave(shost->host_lock, flags); - if (vport->flags & FC_VPORT_CREATING) { - spin_unlock_irqrestore(shost->host_lock, flags); - return -EBUSY; - } - if (vport->flags & (FC_VPORT_DEL)) { - spin_unlock_irqrestore(shost->host_lock, flags); - return -EALREADY; - } - vport->flags |= FC_VPORT_DELETING; - spin_unlock_irqrestore(shost->host_lock, flags); - if (i->f->vport_delete) stat = i->f->vport_delete(vport); else -- cgit v1.2.3 From 5fe46e9d733f19a880ef7e516002bd4c2b833e14 Mon Sep 17 00:00:00 2001 From: Bian Naimeng Date: Mon, 8 Mar 2010 14:49:01 +0800 Subject: rpc client can not deal with ENOSOCK, so translate it into ENOCONN If NFSv4 client send a request before connect, or the old connection was broken because a ETIMEOUT error catched by call_status, ->send_request will return ENOSOCK, but rpc layer can not deal with it, so make sure ->send_request can translate ENOSOCK into ENOCONN. Signed-off-by: Bian Naimeng Signed-off-by: Trond Myklebust --- net/sunrpc/xprtsock.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 712412982cee..abb8a589555f 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -549,8 +549,6 @@ static int xs_udp_send_request(struct rpc_task *task) /* Still some bytes left; set up for a retry later. */ status = -EAGAIN; } - if (!transport->sock) - goto out; switch (status) { case -ENOTSOCK: @@ -570,7 +568,7 @@ static int xs_udp_send_request(struct rpc_task *task) * prompts ECONNREFUSED. */ clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); } -out: + return status; } @@ -652,8 +650,6 @@ static int xs_tcp_send_request(struct rpc_task *task) status = -EAGAIN; break; } - if (!transport->sock) - goto out; switch (status) { case -ENOTSOCK: @@ -673,7 +669,7 @@ static int xs_tcp_send_request(struct rpc_task *task) case -ENOTCONN: clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); } -out: + return status; } -- cgit v1.2.3 From 7dd08a570dcf45d52155996fee688405635ee481 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Sat, 6 Mar 2010 15:02:22 +0300 Subject: nfs: fix unlikely memory leak I'll admit that it's unlikely for the first allocation to fail and the second one to succeed. I won't be offended if you ignore this patch. Signed-off-by: Dan Carpenter Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index eda74c42d552..f9254fb0c9d0 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -5107,6 +5107,7 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp, res = kzalloc(sizeof(*res), GFP_KERNEL); if (!args || !res) { kfree(args); + kfree(res); nfs_put_client(clp); return -ENOMEM; } -- cgit v1.2.3 From 49697ee79242d5f8ac88f1ebc62e583d16bcc687 Mon Sep 17 00:00:00 2001 From: Steve Dickson Date: Tue, 13 Oct 2009 16:07:33 -0400 Subject: nfs4: Make the v4 callback service hidden To avoid hangs in the svc_unregister(), on version 4 mounts (and unmounts), when rpcbind is not running, make the nfs4 callback program an 'hidden' service by setting the 'vs_hidden' flag in the nfs4_callback_version structure. Signed-off-by: Steve Dickson Signed-off-by: Trond Myklebust --- fs/nfs/callback_xdr.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index db30c0b398b5..a2b8b4df125d 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -782,6 +782,7 @@ struct svc_version nfs4_callback_version1 = { .vs_proc = nfs4_callback_procedures1, .vs_xdrsize = NFS4_CALLBACK_XDRSIZE, .vs_dispatch = NULL, + .vs_hidden = 1, }; struct svc_version nfs4_callback_version4 = { -- cgit v1.2.3 From 6bd1fbea2e8bff9346a5afb44ca948b4e4ed74a5 Mon Sep 17 00:00:00 2001 From: Sonic Zhang Date: Wed, 9 Sep 2009 10:46:19 +0000 Subject: Blackfin: move on-chip UART resources to boards files Rather than keeping the pins in the actual driver and worrying about a mess of Kconfig options, declare all the desired pin resources in the boards file. This lets people easily select the specific pins/ports for the normal UART as well as GPIOs for CTS/RTS. Signed-off-by: Sonic Zhang Signed-off-by: Mike Frysinger --- arch/blackfin/mach-bf518/boards/ezbrd.c | 88 ++++++++++-- arch/blackfin/mach-bf527/boards/cm_bf527.c | 98 +++++++++++-- arch/blackfin/mach-bf527/boards/ezbrd.c | 98 +++++++++++-- arch/blackfin/mach-bf527/boards/ezkit.c | 98 +++++++++++-- arch/blackfin/mach-bf533/boards/H8606.c | 47 ++++-- arch/blackfin/mach-bf533/boards/blackstamp.c | 47 ++++-- arch/blackfin/mach-bf533/boards/cm_bf533.c | 47 ++++-- arch/blackfin/mach-bf533/boards/ezkit.c | 47 ++++-- arch/blackfin/mach-bf533/boards/ip0x.c | 48 +++++-- arch/blackfin/mach-bf533/boards/stamp.c | 47 ++++-- arch/blackfin/mach-bf537/boards/cm_bf537e.c | 22 ++- arch/blackfin/mach-bf537/boards/cm_bf537u.c | 93 ++++++++++-- arch/blackfin/mach-bf537/boards/minotaur.c | 93 ++++++++++-- arch/blackfin/mach-bf537/boards/pnav10.c | 93 ++++++++++-- arch/blackfin/mach-bf537/boards/stamp.c | 100 +++++++++++-- arch/blackfin/mach-bf537/boards/tcm_bf537.c | 93 ++++++++++-- arch/blackfin/mach-bf538/boards/ezkit.c | 145 +++++++++++++++++-- arch/blackfin/mach-bf548/boards/cm_bf548.c | 206 +++++++++++++++++++++++++-- arch/blackfin/mach-bf548/boards/ezkit.c | 206 +++++++++++++++++++++++++-- arch/blackfin/mach-bf561/boards/cm_bf561.c | 47 ++++-- arch/blackfin/mach-bf561/boards/ezkit.c | 47 ++++-- arch/blackfin/mach-bf561/boards/tepla.c | 53 +++++++ 22 files changed, 1667 insertions(+), 196 deletions(-) diff --git a/arch/blackfin/mach-bf518/boards/ezbrd.c b/arch/blackfin/mach-bf518/boards/ezbrd.c index 01975c017116..0d1b021ffa69 100644 --- a/arch/blackfin/mach-bf518/boards/ezbrd.c +++ b/arch/blackfin/mach-bf518/boards/ezbrd.c @@ -382,30 +382,93 @@ static struct platform_device bfin_spi1_device = { #endif /* spi master and devices */ #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) -static struct resource bfin_uart_resources[] = { #ifdef CONFIG_SERIAL_BFIN_UART0 +static struct resource bfin_uart0_resources[] = { { - .start = 0xFFC00400, - .end = 0xFFC004FF, + .start = UART0_THR, + .end = UART0_GCTL+2, .flags = IORESOURCE_MEM, }, + { + .start = IRQ_UART0_RX, + .end = IRQ_UART0_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART0_ERROR, + .end = IRQ_UART0_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART0_TX, + .end = CH_UART0_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART0_RX, + .end = CH_UART0_RX, + .flags = IORESOURCE_DMA, + }, +}; + +unsigned short bfin_uart0_peripherals[] = { + P_UART0_TX, P_UART0_RX, 0 +}; + +static struct platform_device bfin_uart0_device = { + .name = "bfin-uart", + .id = 0, + .num_resources = ARRAY_SIZE(bfin_uart0_resources), + .resource = bfin_uart0_resources, + .dev = { + .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ + }, +}; #endif #ifdef CONFIG_SERIAL_BFIN_UART1 +static struct resource bfin_uart1_resources[] = { { - .start = 0xFFC02000, - .end = 0xFFC020FF, + .start = UART1_THR, + .end = UART1_GCTL+2, .flags = IORESOURCE_MEM, }, -#endif + { + .start = IRQ_UART1_RX, + .end = IRQ_UART1_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART1_ERROR, + .end = IRQ_UART1_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART1_TX, + .end = CH_UART1_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART1_RX, + .end = CH_UART1_RX, + .flags = IORESOURCE_DMA, + }, }; -static struct platform_device bfin_uart_device = { +unsigned short bfin_uart1_peripherals[] = { + P_UART1_TX, P_UART1_RX, 0 +}; + +static struct platform_device bfin_uart1_device = { .name = "bfin-uart", .id = 1, - .num_resources = ARRAY_SIZE(bfin_uart_resources), - .resource = bfin_uart_resources, + .num_resources = ARRAY_SIZE(bfin_uart1_resources), + .resource = bfin_uart1_resources, + .dev = { + .platform_data = &bfin_uart1_peripherals, /* Passed to driver */ + }, }; #endif +#endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 @@ -593,7 +656,12 @@ static struct platform_device *stamp_devices[] __initdata = { #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) - &bfin_uart_device, +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART1 + &bfin_uart1_device, +#endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c index 7ab0800e2914..2d93c1f520a2 100644 --- a/arch/blackfin/mach-bf527/boards/cm_bf527.c +++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c @@ -603,30 +603,105 @@ static struct platform_device cm_flash_device = { #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) -static struct resource bfin_uart_resources[] = { #ifdef CONFIG_SERIAL_BFIN_UART0 +static struct resource bfin_uart0_resources[] = { { - .start = 0xFFC00400, - .end = 0xFFC004FF, + .start = UART0_THR, + .end = UART0_GCTL+2, .flags = IORESOURCE_MEM, }, + { + .start = IRQ_UART0_RX, + .end = IRQ_UART0_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART0_ERROR, + .end = IRQ_UART0_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART0_TX, + .end = CH_UART0_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART0_RX, + .end = CH_UART0_RX, + .flags = IORESOURCE_DMA, + }, +}; + +unsigned short bfin_uart0_peripherals[] = { + P_UART0_TX, P_UART0_RX, 0 +}; + +static struct platform_device bfin_uart0_device = { + .name = "bfin-uart", + .id = 0, + .num_resources = ARRAY_SIZE(bfin_uart0_resources), + .resource = bfin_uart0_resources, + .dev = { + .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ + }, +}; #endif #ifdef CONFIG_SERIAL_BFIN_UART1 +static struct resource bfin_uart1_resources[] = { { - .start = 0xFFC02000, - .end = 0xFFC020FF, + .start = UART1_THR, + .end = UART1_GCTL+2, .flags = IORESOURCE_MEM, }, + { + .start = IRQ_UART1_RX, + .end = IRQ_UART1_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART1_ERROR, + .end = IRQ_UART1_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART1_TX, + .end = CH_UART1_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART1_RX, + .end = CH_UART1_RX, + .flags = IORESOURCE_DMA, + }, +#ifdef CONFIG_BFIN_UART1_CTSRTS + { /* CTS pin */ + .start = GPIO_PF9, + .end = GPIO_PF9, + .flags = IORESOURCE_IO, + }, + { /* RTS pin */ + .start = GPIO_PF10, + .end = GPIO_PF10, + .flags = IORESOURCE_IO, + }, #endif }; -static struct platform_device bfin_uart_device = { +unsigned short bfin_uart1_peripherals[] = { + P_UART1_TX, P_UART1_RX, 0 +}; + +static struct platform_device bfin_uart1_device = { .name = "bfin-uart", .id = 1, - .num_resources = ARRAY_SIZE(bfin_uart_resources), - .resource = bfin_uart_resources, + .num_resources = ARRAY_SIZE(bfin_uart1_resources), + .resource = bfin_uart1_resources, + .dev = { + .platform_data = &bfin_uart1_peripherals, /* Passed to driver */ + }, }; #endif +#endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 @@ -829,7 +904,12 @@ static struct platform_device *cmbf527_devices[] __initdata = { #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) - &bfin_uart_device, +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART1 + &bfin_uart1_device, +#endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) diff --git a/arch/blackfin/mach-bf527/boards/ezbrd.c b/arch/blackfin/mach-bf527/boards/ezbrd.c index cad23b15d83c..3a981e855668 100644 --- a/arch/blackfin/mach-bf527/boards/ezbrd.c +++ b/arch/blackfin/mach-bf527/boards/ezbrd.c @@ -439,30 +439,105 @@ static struct platform_device bfin_spi0_device = { #endif /* spi master and devices */ #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) -static struct resource bfin_uart_resources[] = { #ifdef CONFIG_SERIAL_BFIN_UART0 +static struct resource bfin_uart0_resources[] = { { - .start = 0xFFC00400, - .end = 0xFFC004FF, + .start = UART0_THR, + .end = UART0_GCTL+2, .flags = IORESOURCE_MEM, }, + { + .start = IRQ_UART0_RX, + .end = IRQ_UART0_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART0_ERROR, + .end = IRQ_UART0_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART0_TX, + .end = CH_UART0_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART0_RX, + .end = CH_UART0_RX, + .flags = IORESOURCE_DMA, + }, +}; + +unsigned short bfin_uart0_peripherals[] = { + P_UART0_TX, P_UART0_RX, 0 +}; + +static struct platform_device bfin_uart0_device = { + .name = "bfin-uart", + .id = 0, + .num_resources = ARRAY_SIZE(bfin_uart0_resources), + .resource = bfin_uart0_resources, + .dev = { + .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ + }, +}; #endif #ifdef CONFIG_SERIAL_BFIN_UART1 +static struct resource bfin_uart1_resources[] = { { - .start = 0xFFC02000, - .end = 0xFFC020FF, + .start = UART1_THR, + .end = UART1_GCTL+2, .flags = IORESOURCE_MEM, }, + { + .start = IRQ_UART1_RX, + .end = IRQ_UART1_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART1_ERROR, + .end = IRQ_UART1_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART1_TX, + .end = CH_UART1_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART1_RX, + .end = CH_UART1_RX, + .flags = IORESOURCE_DMA, + }, +#ifdef CONFIG_BFIN_UART1_CTSRTS + { /* CTS pin */ + .start = GPIO_PG0, + .end = GPIO_PG0, + .flags = IORESOURCE_IO, + }, + { /* RTS pin */ + .start = GPIO_PF10, + .end = GPIO_PF10, + .flags = IORESOURCE_IO, + }, #endif }; -static struct platform_device bfin_uart_device = { +unsigned short bfin_uart1_peripherals[] = { + P_UART1_TX, P_UART1_RX, 0 +}; + +static struct platform_device bfin_uart1_device = { .name = "bfin-uart", .id = 1, - .num_resources = ARRAY_SIZE(bfin_uart_resources), - .resource = bfin_uart_resources, + .num_resources = ARRAY_SIZE(bfin_uart1_resources), + .resource = bfin_uart1_resources, + .dev = { + .platform_data = &bfin_uart1_peripherals, /* Passed to driver */ + }, }; #endif +#endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 @@ -665,7 +740,12 @@ static struct platform_device *stamp_devices[] __initdata = { #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) - &bfin_uart_device, +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART1 + &bfin_uart1_device, +#endif #endif #if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c index 5294fdd20732..0750e655fb22 100644 --- a/arch/blackfin/mach-bf527/boards/ezkit.c +++ b/arch/blackfin/mach-bf527/boards/ezkit.c @@ -647,30 +647,105 @@ static struct platform_device bfin_spi0_device = { #endif /* spi master and devices */ #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) -static struct resource bfin_uart_resources[] = { #ifdef CONFIG_SERIAL_BFIN_UART0 +static struct resource bfin_uart0_resources[] = { { - .start = 0xFFC00400, - .end = 0xFFC004FF, + .start = UART0_THR, + .end = UART0_GCTL+2, .flags = IORESOURCE_MEM, }, + { + .start = IRQ_UART0_RX, + .end = IRQ_UART0_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART0_ERROR, + .end = IRQ_UART0_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART0_TX, + .end = CH_UART0_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART0_RX, + .end = CH_UART0_RX, + .flags = IORESOURCE_DMA, + }, +}; + +unsigned short bfin_uart0_peripherals[] = { + P_UART0_TX, P_UART0_RX, 0 +}; + +static struct platform_device bfin_uart0_device = { + .name = "bfin-uart", + .id = 0, + .num_resources = ARRAY_SIZE(bfin_uart0_resources), + .resource = bfin_uart0_resources, + .dev = { + .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ + }, +}; #endif #ifdef CONFIG_SERIAL_BFIN_UART1 +static struct resource bfin_uart1_resources[] = { { - .start = 0xFFC02000, - .end = 0xFFC020FF, + .start = UART1_THR, + .end = UART1_GCTL+2, .flags = IORESOURCE_MEM, }, + { + .start = IRQ_UART1_RX, + .end = IRQ_UART1_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART1_ERROR, + .end = IRQ_UART1_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART1_TX, + .end = CH_UART1_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART1_RX, + .end = CH_UART1_RX, + .flags = IORESOURCE_DMA, + }, +#ifdef CONFIG_BFIN_UART1_CTSRTS + { /* CTS pin */ + .start = GPIO_PF9, + .end = GPIO_PF9, + .flags = IORESOURCE_IO, + }, + { /* RTS pin */ + .start = GPIO_PF10, + .end = GPIO_PF10, + .flags = IORESOURCE_IO, + }, #endif }; -static struct platform_device bfin_uart_device = { +unsigned short bfin_uart1_peripherals[] = { + P_UART1_TX, P_UART1_RX, 0 +}; + +static struct platform_device bfin_uart1_device = { .name = "bfin-uart", .id = 1, - .num_resources = ARRAY_SIZE(bfin_uart_resources), - .resource = bfin_uart_resources, + .num_resources = ARRAY_SIZE(bfin_uart1_resources), + .resource = bfin_uart1_resources, + .dev = { + .platform_data = &bfin_uart1_peripherals, /* Passed to driver */ + }, }; #endif +#endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 @@ -910,7 +985,12 @@ static struct platform_device *stamp_devices[] __initdata = { #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) - &bfin_uart_device, +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART1 + &bfin_uart1_device, +#endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) diff --git a/arch/blackfin/mach-bf533/boards/H8606.c b/arch/blackfin/mach-bf533/boards/H8606.c index 4adceb0bdb6d..cb3b74f32346 100644 --- a/arch/blackfin/mach-bf533/boards/H8606.c +++ b/arch/blackfin/mach-bf533/boards/H8606.c @@ -257,21 +257,50 @@ static struct platform_device bfin_spi0_device = { #endif /* spi master and devices */ #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) -static struct resource bfin_uart_resources[] = { +#ifdef CONFIG_SERIAL_BFIN_UART0 +static struct resource bfin_uart0_resources[] = { { - .start = 0xFFC00400, - .end = 0xFFC004FF, + .start = BFIN_UART_THR, + .end = BFIN_UART_GCTL+2, .flags = IORESOURCE_MEM, }, + { + .start = IRQ_UART0_RX, + .end = IRQ_UART0_RX + 1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART0_ERROR, + .end = IRQ_UART0_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART0_TX, + .end = CH_UART0_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART0_RX, + .end = CH_UART0_RX, + .flags = IORESOURCE_DMA, + }, +}; + +unsigned short bfin_uart0_peripherals[] = { + P_UART0_TX, P_UART0_RX, 0 }; -static struct platform_device bfin_uart_device = { +static struct platform_device bfin_uart0_device = { .name = "bfin-uart", - .id = 1, - .num_resources = ARRAY_SIZE(bfin_uart_resources), - .resource = bfin_uart_resources, + .id = 0, + .num_resources = ARRAY_SIZE(bfin_uart0_resources), + .resource = bfin_uart0_resources, + .dev = { + .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ + }, }; #endif +#endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 @@ -394,7 +423,9 @@ static struct platform_device *h8606_devices[] __initdata = { #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) - &bfin_uart_device, +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif #endif #if defined(CONFIG_SERIAL_8250) || defined(CONFIG_SERIAL_8250_MODULE) diff --git a/arch/blackfin/mach-bf533/boards/blackstamp.c b/arch/blackfin/mach-bf533/boards/blackstamp.c index b580884848d4..50ce3ecfdcb2 100644 --- a/arch/blackfin/mach-bf533/boards/blackstamp.c +++ b/arch/blackfin/mach-bf533/boards/blackstamp.c @@ -195,21 +195,50 @@ static struct platform_device bfin_spi0_device = { #endif /* spi master and devices */ #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) -static struct resource bfin_uart_resources[] = { +#ifdef CONFIG_SERIAL_BFIN_UART0 +static struct resource bfin_uart0_resources[] = { { - .start = 0xFFC00400, - .end = 0xFFC004FF, + .start = BFIN_UART_THR, + .end = BFIN_UART_GCTL+2, .flags = IORESOURCE_MEM, }, + { + .start = IRQ_UART0_RX, + .end = IRQ_UART0_RX + 1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART0_ERROR, + .end = IRQ_UART0_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART0_TX, + .end = CH_UART0_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART0_RX, + .end = CH_UART0_RX, + .flags = IORESOURCE_DMA, + }, +}; + +unsigned short bfin_uart0_peripherals[] = { + P_UART0_TX, P_UART0_RX, 0 }; -static struct platform_device bfin_uart_device = { +static struct platform_device bfin_uart0_device = { .name = "bfin-uart", - .id = 1, - .num_resources = ARRAY_SIZE(bfin_uart_resources), - .resource = bfin_uart_resources, + .id = 0, + .num_resources = ARRAY_SIZE(bfin_uart0_resources), + .resource = bfin_uart0_resources, + .dev = { + .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ + }, }; #endif +#endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 @@ -344,7 +373,9 @@ static struct platform_device *stamp_devices[] __initdata = { #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) - &bfin_uart_device, +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) diff --git a/arch/blackfin/mach-bf533/boards/cm_bf533.c b/arch/blackfin/mach-bf533/boards/cm_bf533.c index 7fc3b860d4ae..f707faf8a864 100644 --- a/arch/blackfin/mach-bf533/boards/cm_bf533.c +++ b/arch/blackfin/mach-bf533/boards/cm_bf533.c @@ -242,21 +242,50 @@ static struct platform_device smsc911x_device = { #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) -static struct resource bfin_uart_resources[] = { +#ifdef CONFIG_SERIAL_BFIN_UART0 +static struct resource bfin_uart0_resources[] = { { - .start = 0xFFC00400, - .end = 0xFFC004FF, + .start = BFIN_UART_THR, + .end = BFIN_UART_GCTL+2, .flags = IORESOURCE_MEM, }, + { + .start = IRQ_UART0_RX, + .end = IRQ_UART0_RX + 1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART0_ERROR, + .end = IRQ_UART0_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART0_TX, + .end = CH_UART0_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART0_RX, + .end = CH_UART0_RX, + .flags = IORESOURCE_DMA, + }, +}; + +unsigned short bfin_uart0_peripherals[] = { + P_UART0_TX, P_UART0_RX, 0 }; -static struct platform_device bfin_uart_device = { +static struct platform_device bfin_uart0_device = { .name = "bfin-uart", - .id = 1, - .num_resources = ARRAY_SIZE(bfin_uart_resources), - .resource = bfin_uart_resources, + .id = 0, + .num_resources = ARRAY_SIZE(bfin_uart0_resources), + .resource = bfin_uart0_resources, + .dev = { + .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ + }, }; #endif +#endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 @@ -432,7 +461,9 @@ static struct platform_device *cm_bf533_devices[] __initdata = { &bfin_dpmc, #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) - &bfin_uart_device, +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) diff --git a/arch/blackfin/mach-bf533/boards/ezkit.c b/arch/blackfin/mach-bf533/boards/ezkit.c index d4689dcc198e..50392476722a 100644 --- a/arch/blackfin/mach-bf533/boards/ezkit.c +++ b/arch/blackfin/mach-bf533/boards/ezkit.c @@ -320,21 +320,50 @@ static struct platform_device bfin_spi0_device = { #endif /* spi master and devices */ #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) -static struct resource bfin_uart_resources[] = { +#ifdef CONFIG_SERIAL_BFIN_UART0 +static struct resource bfin_uart0_resources[] = { { - .start = 0xFFC00400, - .end = 0xFFC004FF, + .start = BFIN_UART_THR, + .end = BFIN_UART_GCTL+2, .flags = IORESOURCE_MEM, }, + { + .start = IRQ_UART0_RX, + .end = IRQ_UART0_RX + 1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART0_ERROR, + .end = IRQ_UART0_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART0_TX, + .end = CH_UART0_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART0_RX, + .end = CH_UART0_RX, + .flags = IORESOURCE_DMA, + }, +}; + +unsigned short bfin_uart0_peripherals[] = { + P_UART0_TX, P_UART0_RX, 0 }; -static struct platform_device bfin_uart_device = { +static struct platform_device bfin_uart0_device = { .name = "bfin-uart", - .id = 1, - .num_resources = ARRAY_SIZE(bfin_uart_resources), - .resource = bfin_uart_resources, + .id = 0, + .num_resources = ARRAY_SIZE(bfin_uart0_resources), + .resource = bfin_uart0_resources, + .dev = { + .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ + }, }; #endif +#endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 @@ -471,7 +500,9 @@ static struct platform_device *ezkit_devices[] __initdata = { #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) - &bfin_uart_device, +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) diff --git a/arch/blackfin/mach-bf533/boards/ip0x.c b/arch/blackfin/mach-bf533/boards/ip0x.c index 8ec42ba35b9e..a60d0369ceb7 100644 --- a/arch/blackfin/mach-bf533/boards/ip0x.c +++ b/arch/blackfin/mach-bf533/boards/ip0x.c @@ -19,6 +19,7 @@ #include #endif #include +#include #include #include @@ -143,21 +144,50 @@ static struct platform_device spi_bfin_master_device = { #endif /* spi master and devices */ #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) -static struct resource bfin_uart_resources[] = { +#ifdef CONFIG_SERIAL_BFIN_UART0 +static struct resource bfin_uart0_resources[] = { { - .start = 0xFFC00400, - .end = 0xFFC004FF, + .start = BFIN_UART_THR, + .end = BFIN_UART_GCTL+2, .flags = IORESOURCE_MEM, }, + { + .start = IRQ_UART0_RX, + .end = IRQ_UART0_RX + 1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART0_ERROR, + .end = IRQ_UART0_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART0_TX, + .end = CH_UART0_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART0_RX, + .end = CH_UART0_RX, + .flags = IORESOURCE_DMA, + }, +}; + +unsigned short bfin_uart0_peripherals[] = { + P_UART0_TX, P_UART0_RX, 0 }; -static struct platform_device bfin_uart_device = { +static struct platform_device bfin_uart0_device = { .name = "bfin-uart", - .id = 1, - .num_resources = ARRAY_SIZE(bfin_uart_resources), - .resource = bfin_uart_resources, + .id = 0, + .num_resources = ARRAY_SIZE(bfin_uart0_resources), + .resource = bfin_uart0_resources, + .dev = { + .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ + }, }; #endif +#endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 @@ -241,7 +271,9 @@ static struct platform_device *ip0x_devices[] __initdata = { #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) - &bfin_uart_device, +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c index 6d68dcfa2da2..2683e5f2294f 100644 --- a/arch/blackfin/mach-bf533/boards/stamp.c +++ b/arch/blackfin/mach-bf533/boards/stamp.c @@ -322,21 +322,50 @@ static struct platform_device bfin_spi0_device = { #endif /* spi master and devices */ #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) -static struct resource bfin_uart_resources[] = { +#ifdef CONFIG_SERIAL_BFIN_UART0 +static struct resource bfin_uart0_resources[] = { { - .start = 0xFFC00400, - .end = 0xFFC004FF, + .start = BFIN_UART_THR, + .end = BFIN_UART_GCTL+2, .flags = IORESOURCE_MEM, }, + { + .start = IRQ_UART0_RX, + .end = IRQ_UART0_RX + 1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART0_ERROR, + .end = IRQ_UART0_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART0_TX, + .end = CH_UART0_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART0_RX, + .end = CH_UART0_RX, + .flags = IORESOURCE_DMA, + }, +}; + +unsigned short bfin_uart0_peripherals[] = { + P_UART0_TX, P_UART0_RX, 0 }; -static struct platform_device bfin_uart_device = { +static struct platform_device bfin_uart0_device = { .name = "bfin-uart", - .id = 1, - .num_resources = ARRAY_SIZE(bfin_uart_resources), - .resource = bfin_uart_resources, + .id = 0, + .num_resources = ARRAY_SIZE(bfin_uart0_resources), + .resource = bfin_uart0_resources, + .dev = { + .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ + }, }; #endif +#endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 @@ -495,7 +524,9 @@ static struct platform_device *stamp_devices[] __initdata = { #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) - &bfin_uart_device, +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537e.c b/arch/blackfin/mach-bf537/boards/cm_bf537e.c index c85f4d770535..abb32ddc5122 100644 --- a/arch/blackfin/mach-bf537/boards/cm_bf537e.c +++ b/arch/blackfin/mach-bf537/boards/cm_bf537e.c @@ -329,8 +329,8 @@ static struct platform_device cm_flash_device = { #ifdef CONFIG_SERIAL_BFIN_UART0 static struct resource bfin_uart0_resources[] = { { - .start = 0xFFC00400, - .end = 0xFFC004FF, + .start = UART0_THR, + .end = UART0_GCTL+2, .flags = IORESOURCE_MEM, }, { @@ -373,18 +373,25 @@ static struct resource bfin_uart0_resources[] = { #endif }; +unsigned short bfin_uart0_peripherals[] = { + P_UART0_TX, P_UART0_RX, 0 +}; + static struct platform_device bfin_uart0_device = { .name = "bfin-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_uart0_resources), .resource = bfin_uart0_resources, + .dev = { + .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ + }, }; #endif #ifdef CONFIG_SERIAL_BFIN_UART1 static struct resource bfin_uart1_resources[] = { { - .start = 0xFFC02000, - .end = 0xFFC020FF, + .start = UART1_THR, + .end = UART1_GCTL+2, .flags = IORESOURCE_MEM, }, { @@ -427,11 +434,18 @@ static struct resource bfin_uart1_resources[] = { #endif }; +unsigned short bfin_uart1_peripherals[] = { + P_UART1_TX, P_UART1_RX, 0 +}; + static struct platform_device bfin_uart1_device = { .name = "bfin-uart", .id = 1, .num_resources = ARRAY_SIZE(bfin_uart1_resources), .resource = bfin_uart1_resources, + .dev = { + .platform_data = &bfin_uart1_peripherals, /* Passed to driver */ + }, }; #endif #endif diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537u.c b/arch/blackfin/mach-bf537/boards/cm_bf537u.c index ea11aa81340d..b3182face957 100644 --- a/arch/blackfin/mach-bf537/boards/cm_bf537u.c +++ b/arch/blackfin/mach-bf537/boards/cm_bf537u.c @@ -327,25 +327,93 @@ static struct platform_device cm_flash_device = { #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) -static struct resource bfin_uart_resources[] = { +#ifdef CONFIG_SERIAL_BFIN_UART0 +static struct resource bfin_uart0_resources[] = { { - .start = 0xFFC00400, - .end = 0xFFC004FF, + .start = UART0_THR, + .end = UART0_GCTL+2, .flags = IORESOURCE_MEM, - }, { - .start = 0xFFC02000, - .end = 0xFFC020FF, + }, + { + .start = IRQ_UART0_RX, + .end = IRQ_UART0_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART0_ERROR, + .end = IRQ_UART0_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART0_TX, + .end = CH_UART0_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART0_RX, + .end = CH_UART0_RX, + .flags = IORESOURCE_DMA, + }, +}; + +unsigned short bfin_uart0_peripherals[] = { + P_UART0_TX, P_UART0_RX, 0 +}; + +static struct platform_device bfin_uart0_device = { + .name = "bfin-uart", + .id = 0, + .num_resources = ARRAY_SIZE(bfin_uart0_resources), + .resource = bfin_uart0_resources, + .dev = { + .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ + }, +}; +#endif +#ifdef CONFIG_SERIAL_BFIN_UART1 +static struct resource bfin_uart1_resources[] = { + { + .start = UART1_THR, + .end = UART1_GCTL+2, .flags = IORESOURCE_MEM, }, + { + .start = IRQ_UART1_RX, + .end = IRQ_UART1_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART1_ERROR, + .end = IRQ_UART1_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART1_TX, + .end = CH_UART1_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART1_RX, + .end = CH_UART1_RX, + .flags = IORESOURCE_DMA, + }, }; -static struct platform_device bfin_uart_device = { +unsigned short bfin_uart1_peripherals[] = { + P_UART1_TX, P_UART1_RX, 0 +}; + +static struct platform_device bfin_uart1_device = { .name = "bfin-uart", .id = 1, - .num_resources = ARRAY_SIZE(bfin_uart_resources), - .resource = bfin_uart_resources, + .num_resources = ARRAY_SIZE(bfin_uart1_resources), + .resource = bfin_uart1_resources, + .dev = { + .platform_data = &bfin_uart1_peripherals, /* Passed to driver */ + }, }; #endif +#endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 @@ -522,7 +590,12 @@ static struct platform_device *cm_bf537u_devices[] __initdata = { #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) - &bfin_uart_device, +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART1 + &bfin_uart1_device, +#endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) diff --git a/arch/blackfin/mach-bf537/boards/minotaur.c b/arch/blackfin/mach-bf537/boards/minotaur.c index 0da927252701..58d6a20e7a0d 100644 --- a/arch/blackfin/mach-bf537/boards/minotaur.c +++ b/arch/blackfin/mach-bf537/boards/minotaur.c @@ -211,25 +211,93 @@ static struct platform_device bfin_spi0_device = { #endif /* spi master and devices */ #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) -static struct resource bfin_uart_resources[] = { +#ifdef CONFIG_SERIAL_BFIN_UART0 +static struct resource bfin_uart0_resources[] = { { - .start = 0xFFC00400, - .end = 0xFFC004FF, + .start = UART0_THR, + .end = UART0_GCTL+2, .flags = IORESOURCE_MEM, - }, { - .start = 0xFFC02000, - .end = 0xFFC020FF, + }, + { + .start = IRQ_UART0_RX, + .end = IRQ_UART0_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART0_ERROR, + .end = IRQ_UART0_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART0_TX, + .end = CH_UART0_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART0_RX, + .end = CH_UART0_RX, + .flags = IORESOURCE_DMA, + }, +}; + +unsigned short bfin_uart0_peripherals[] = { + P_UART0_TX, P_UART0_RX, 0 +}; + +static struct platform_device bfin_uart0_device = { + .name = "bfin-uart", + .id = 0, + .num_resources = ARRAY_SIZE(bfin_uart0_resources), + .resource = bfin_uart0_resources, + .dev = { + .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ + }, +}; +#endif +#ifdef CONFIG_SERIAL_BFIN_UART1 +static struct resource bfin_uart1_resources[] = { + { + .start = UART1_THR, + .end = UART1_GCTL+2, .flags = IORESOURCE_MEM, }, + { + .start = IRQ_UART1_RX, + .end = IRQ_UART1_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART1_ERROR, + .end = IRQ_UART1_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART1_TX, + .end = CH_UART1_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART1_RX, + .end = CH_UART1_RX, + .flags = IORESOURCE_DMA, + }, }; -static struct platform_device bfin_uart_device = { +unsigned short bfin_uart1_peripherals[] = { + P_UART1_TX, P_UART1_RX, 0 +}; + +static struct platform_device bfin_uart1_device = { .name = "bfin-uart", .id = 1, - .num_resources = ARRAY_SIZE(bfin_uart_resources), - .resource = bfin_uart_resources, + .num_resources = ARRAY_SIZE(bfin_uart1_resources), + .resource = bfin_uart1_resources, + .dev = { + .platform_data = &bfin_uart1_peripherals, /* Passed to driver */ + }, }; #endif +#endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 @@ -343,7 +411,12 @@ static struct platform_device *minotaur_devices[] __initdata = { #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) - &bfin_uart_device, +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART1 + &bfin_uart1_device, +#endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) diff --git a/arch/blackfin/mach-bf537/boards/pnav10.c b/arch/blackfin/mach-bf537/boards/pnav10.c index 4e0afda472ab..3c36abdf3405 100644 --- a/arch/blackfin/mach-bf537/boards/pnav10.c +++ b/arch/blackfin/mach-bf537/boards/pnav10.c @@ -362,25 +362,93 @@ static struct platform_device bfin_fb_device = { #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) -static struct resource bfin_uart_resources[] = { +#ifdef CONFIG_SERIAL_BFIN_UART0 +static struct resource bfin_uart0_resources[] = { { - .start = 0xFFC00400, - .end = 0xFFC004FF, + .start = UART0_THR, + .end = UART0_GCTL+2, .flags = IORESOURCE_MEM, - }, { - .start = 0xFFC02000, - .end = 0xFFC020FF, + }, + { + .start = IRQ_UART0_RX, + .end = IRQ_UART0_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART0_ERROR, + .end = IRQ_UART0_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART0_TX, + .end = CH_UART0_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART0_RX, + .end = CH_UART0_RX, + .flags = IORESOURCE_DMA, + }, +}; + +unsigned short bfin_uart0_peripherals[] = { + P_UART0_TX, P_UART0_RX, 0 +}; + +static struct platform_device bfin_uart0_device = { + .name = "bfin-uart", + .id = 0, + .num_resources = ARRAY_SIZE(bfin_uart0_resources), + .resource = bfin_uart0_resources, + .dev = { + .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ + }, +}; +#endif +#ifdef CONFIG_SERIAL_BFIN_UART1 +static struct resource bfin_uart1_resources[] = { + { + .start = UART1_THR, + .end = UART1_GCTL+2, .flags = IORESOURCE_MEM, }, + { + .start = IRQ_UART1_RX, + .end = IRQ_UART1_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART1_ERROR, + .end = IRQ_UART1_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART1_TX, + .end = CH_UART1_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART1_RX, + .end = CH_UART1_RX, + .flags = IORESOURCE_DMA, + }, }; -static struct platform_device bfin_uart_device = { +unsigned short bfin_uart1_peripherals[] = { + P_UART1_TX, P_UART1_RX, 0 +}; + +static struct platform_device bfin_uart1_device = { .name = "bfin-uart", .id = 1, - .num_resources = ARRAY_SIZE(bfin_uart_resources), - .resource = bfin_uart_resources, + .num_resources = ARRAY_SIZE(bfin_uart1_resources), + .resource = bfin_uart1_resources, + .dev = { + .platform_data = &bfin_uart1_peripherals, /* Passed to driver */ + }, }; #endif +#endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 @@ -472,7 +540,12 @@ static struct platform_device *stamp_devices[] __initdata = { #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) - &bfin_uart_device, +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART1 + &bfin_uart1_device, +#endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c index ac9b52e0087c..9842288c4f1c 100644 --- a/arch/blackfin/mach-bf537/boards/stamp.c +++ b/arch/blackfin/mach-bf537/boards/stamp.c @@ -1157,30 +1157,105 @@ static struct platform_device bfin_lq035q1_device = { #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) -static struct resource bfin_uart_resources[] = { #ifdef CONFIG_SERIAL_BFIN_UART0 +static struct resource bfin_uart0_resources[] = { { - .start = 0xFFC00400, - .end = 0xFFC004FF, + .start = UART0_THR, + .end = UART0_GCTL+2, .flags = IORESOURCE_MEM, }, + { + .start = IRQ_UART0_RX, + .end = IRQ_UART0_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART0_ERROR, + .end = IRQ_UART0_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART0_TX, + .end = CH_UART0_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART0_RX, + .end = CH_UART0_RX, + .flags = IORESOURCE_DMA, + }, +#ifdef CONFIG_BFIN_UART0_CTSRTS + { /* CTS pin */ + .start = GPIO_PG7, + .end = GPIO_PG7, + .flags = IORESOURCE_IO, + }, + { /* RTS pin */ + .start = GPIO_PG6, + .end = GPIO_PG6, + .flags = IORESOURCE_IO, + }, +#endif +}; + +unsigned short bfin_uart0_peripherals[] = { + P_UART0_TX, P_UART0_RX, 0 +}; + +static struct platform_device bfin_uart0_device = { + .name = "bfin-uart", + .id = 0, + .num_resources = ARRAY_SIZE(bfin_uart0_resources), + .resource = bfin_uart0_resources, + .dev = { + .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ + }, +}; #endif #ifdef CONFIG_SERIAL_BFIN_UART1 +static struct resource bfin_uart1_resources[] = { { - .start = 0xFFC02000, - .end = 0xFFC020FF, + .start = UART1_THR, + .end = UART1_GCTL+2, .flags = IORESOURCE_MEM, }, -#endif + { + .start = IRQ_UART1_RX, + .end = IRQ_UART1_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART1_ERROR, + .end = IRQ_UART1_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART1_TX, + .end = CH_UART1_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART1_RX, + .end = CH_UART1_RX, + .flags = IORESOURCE_DMA, + }, +}; + +unsigned short bfin_uart1_peripherals[] = { + P_UART1_TX, P_UART1_RX, 0 }; -static struct platform_device bfin_uart_device = { +static struct platform_device bfin_uart1_device = { .name = "bfin-uart", .id = 1, - .num_resources = ARRAY_SIZE(bfin_uart_resources), - .resource = bfin_uart_resources, + .num_resources = ARRAY_SIZE(bfin_uart1_resources), + .resource = bfin_uart1_resources, + .dev = { + .platform_data = &bfin_uart1_peripherals, /* Passed to driver */ + }, }; #endif +#endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 @@ -1771,7 +1846,12 @@ static struct platform_device *stamp_devices[] __initdata = { #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) - &bfin_uart_device, +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART1 + &bfin_uart1_device, +#endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c index 57163b65a4f5..d77ca5b75685 100644 --- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c +++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c @@ -327,25 +327,93 @@ static struct platform_device cm_flash_device = { #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) -static struct resource bfin_uart_resources[] = { +#ifdef CONFIG_SERIAL_BFIN_UART0 +static struct resource bfin_uart0_resources[] = { { - .start = 0xFFC00400, - .end = 0xFFC004FF, + .start = UART0_THR, + .end = UART0_GCTL+2, .flags = IORESOURCE_MEM, - }, { - .start = 0xFFC02000, - .end = 0xFFC020FF, + }, + { + .start = IRQ_UART0_RX, + .end = IRQ_UART0_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART0_ERROR, + .end = IRQ_UART0_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART0_TX, + .end = CH_UART0_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART0_RX, + .end = CH_UART0_RX, + .flags = IORESOURCE_DMA, + }, +}; + +unsigned short bfin_uart0_peripherals[] = { + P_UART0_TX, P_UART0_RX, 0 +}; + +static struct platform_device bfin_uart0_device = { + .name = "bfin-uart", + .id = 0, + .num_resources = ARRAY_SIZE(bfin_uart0_resources), + .resource = bfin_uart0_resources, + .dev = { + .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ + }, +}; +#endif +#ifdef CONFIG_SERIAL_BFIN_UART1 +static struct resource bfin_uart1_resources[] = { + { + .start = UART1_THR, + .end = UART1_GCTL+2, .flags = IORESOURCE_MEM, }, + { + .start = IRQ_UART1_RX, + .end = IRQ_UART1_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART1_ERROR, + .end = IRQ_UART1_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART1_TX, + .end = CH_UART1_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART1_RX, + .end = CH_UART1_RX, + .flags = IORESOURCE_DMA, + }, }; -static struct platform_device bfin_uart_device = { +unsigned short bfin_uart1_peripherals[] = { + P_UART1_TX, P_UART1_RX, 0 +}; + +static struct platform_device bfin_uart1_device = { .name = "bfin-uart", .id = 1, - .num_resources = ARRAY_SIZE(bfin_uart_resources), - .resource = bfin_uart_resources, + .num_resources = ARRAY_SIZE(bfin_uart1_resources), + .resource = bfin_uart1_resources, + .dev = { + .platform_data = &bfin_uart1_peripherals, /* Passed to driver */ + }, }; #endif +#endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 @@ -524,7 +592,12 @@ static struct platform_device *cm_bf537_devices[] __initdata = { #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) - &bfin_uart_device, +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART1 + &bfin_uart1_device, +#endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) diff --git a/arch/blackfin/mach-bf538/boards/ezkit.c b/arch/blackfin/mach-bf538/boards/ezkit.c index c296bb1ed503..1912bc4555aa 100644 --- a/arch/blackfin/mach-bf538/boards/ezkit.c +++ b/arch/blackfin/mach-bf538/boards/ezkit.c @@ -41,37 +41,148 @@ static struct platform_device rtc_device = { #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) -static struct resource bfin_uart_resources[] = { #ifdef CONFIG_SERIAL_BFIN_UART0 +static struct resource bfin_uart0_resources[] = { { - .start = 0xFFC00400, - .end = 0xFFC004FF, + .start = UART0_THR, + .end = UART0_GCTL+2, .flags = IORESOURCE_MEM, }, + { + .start = IRQ_UART0_RX, + .end = IRQ_UART0_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART0_ERROR, + .end = IRQ_UART0_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART0_TX, + .end = CH_UART0_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART0_RX, + .end = CH_UART0_RX, + .flags = IORESOURCE_DMA, + }, +#ifdef CONFIG_BFIN_UART0_CTSRTS + { /* CTS pin */ + .start = GPIO_PG7, + .end = GPIO_PG7, + .flags = IORESOURCE_IO, + }, + { /* RTS pin */ + .start = GPIO_PG6, + .end = GPIO_PG6, + .flags = IORESOURCE_IO, + }, +#endif +}; + +unsigned short bfin_uart0_peripherals[] = { + P_UART0_TX, P_UART0_RX, 0 +}; + +static struct platform_device bfin_uart0_device = { + .name = "bfin-uart", + .id = 0, + .num_resources = ARRAY_SIZE(bfin_uart0_resources), + .resource = bfin_uart0_resources, + .dev = { + .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ + }, +}; #endif #ifdef CONFIG_SERIAL_BFIN_UART1 +static struct resource bfin_uart1_resources[] = { { - .start = 0xFFC02000, - .end = 0xFFC020FF, + .start = UART1_THR, + .end = UART1_GCTL+2, .flags = IORESOURCE_MEM, }, + { + .start = IRQ_UART1_RX, + .end = IRQ_UART1_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART1_ERROR, + .end = IRQ_UART1_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART1_TX, + .end = CH_UART1_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART1_RX, + .end = CH_UART1_RX, + .flags = IORESOURCE_DMA, + }, +}; + +unsigned short bfin_uart1_peripherals[] = { + P_UART1_TX, P_UART1_RX, 0 +}; + +static struct platform_device bfin_uart1_device = { + .name = "bfin-uart", + .id = 1, + .num_resources = ARRAY_SIZE(bfin_uart1_resources), + .resource = bfin_uart1_resources, + .dev = { + .platform_data = &bfin_uart1_peripherals, /* Passed to driver */ + }, +}; #endif #ifdef CONFIG_SERIAL_BFIN_UART2 +static struct resource bfin_uart2_resources[] = { { - .start = 0xFFC02100, - .end = 0xFFC021FF, + .start = UART2_THR, + .end = UART2_GCTL+2, .flags = IORESOURCE_MEM, }, -#endif + { + .start = IRQ_UART2_RX, + .end = IRQ_UART2_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART2_ERROR, + .end = IRQ_UART2_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART2_TX, + .end = CH_UART2_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART2_RX, + .end = CH_UART2_RX, + .flags = IORESOURCE_DMA, + }, }; -static struct platform_device bfin_uart_device = { +unsigned short bfin_uart2_peripherals[] = { + P_UART2_TX, P_UART2_RX, 0 +}; + +static struct platform_device bfin_uart2_device = { .name = "bfin-uart", - .id = 1, - .num_resources = ARRAY_SIZE(bfin_uart_resources), - .resource = bfin_uart_resources, + .id = 2, + .num_resources = ARRAY_SIZE(bfin_uart2_resources), + .resource = bfin_uart2_resources, + .dev = { + .platform_data = &bfin_uart2_peripherals, /* Passed to driver */ + }, }; #endif +#endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 @@ -622,7 +733,15 @@ static struct platform_device *cm_bf538_devices[] __initdata = { #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) - &bfin_uart_device, +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART1 + &bfin_uart1_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART2 + &bfin_uart2_device, +#endif #endif #if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) diff --git a/arch/blackfin/mach-bf548/boards/cm_bf548.c b/arch/blackfin/mach-bf548/boards/cm_bf548.c index ccdcd6da2e9f..635f92723e12 100644 --- a/arch/blackfin/mach-bf548/boards/cm_bf548.c +++ b/arch/blackfin/mach-bf548/boards/cm_bf548.c @@ -127,44 +127,211 @@ static struct platform_device rtc_device = { #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) -static struct resource bfin_uart_resources[] = { #ifdef CONFIG_SERIAL_BFIN_UART0 +static struct resource bfin_uart0_resources[] = { { - .start = 0xFFC00400, - .end = 0xFFC004FF, + .start = UART0_DLL, + .end = UART0_RBR+2, .flags = IORESOURCE_MEM, }, + { + .start = IRQ_UART0_RX, + .end = IRQ_UART0_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART0_ERROR, + .end = IRQ_UART0_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART0_TX, + .end = CH_UART0_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART0_RX, + .end = CH_UART0_RX, + .flags = IORESOURCE_DMA, + }, +}; + +unsigned short bfin_uart0_peripherals[] = { + P_UART0_TX, P_UART0_RX, 0 +}; + +static struct platform_device bfin_uart0_device = { + .name = "bfin-uart", + .id = 0, + .num_resources = ARRAY_SIZE(bfin_uart0_resources), + .resource = bfin_uart0_resources, + .dev = { + .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ + }, +}; #endif #ifdef CONFIG_SERIAL_BFIN_UART1 +static struct resource bfin_uart1_resources[] = { { - .start = 0xFFC02000, - .end = 0xFFC020FF, + .start = UART1_DLL, + .end = UART1_RBR+2, .flags = IORESOURCE_MEM, }, + { + .start = IRQ_UART1_RX, + .end = IRQ_UART1_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART1_ERROR, + .end = IRQ_UART1_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART1_TX, + .end = CH_UART1_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART1_RX, + .end = CH_UART1_RX, + .flags = IORESOURCE_DMA, + }, +#ifdef CONFIG_BFIN_UART1_CTSRTS + { /* CTS pin -- 0 means not supported */ + .start = GPIO_PE10, + .end = GPIO_PE10, + .flags = IORESOURCE_IO, + }, + { /* RTS pin -- 0 means not supported */ + .start = GPIO_PE9, + .end = GPIO_PE9, + .flags = IORESOURCE_IO, + }, +#endif +}; + +unsigned short bfin_uart1_peripherals[] = { + P_UART1_TX, P_UART1_RX, +#ifdef CONFIG_BFIN_UART1_CTSRTS + P_UART1_RTS, P_UART1_CTS, +#endif + 0 +}; + +static struct platform_device bfin_uart1_device = { + .name = "bfin-uart", + .id = 1, + .num_resources = ARRAY_SIZE(bfin_uart1_resources), + .resource = bfin_uart1_resources, + .dev = { + .platform_data = &bfin_uart1_peripherals, /* Passed to driver */ + }, +}; #endif #ifdef CONFIG_SERIAL_BFIN_UART2 +static struct resource bfin_uart2_resources[] = { { - .start = 0xFFC02100, - .end = 0xFFC021FF, + .start = UART2_DLL, + .end = UART2_RBR+2, .flags = IORESOURCE_MEM, }, + { + .start = IRQ_UART2_RX, + .end = IRQ_UART2_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART2_ERROR, + .end = IRQ_UART2_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART2_TX, + .end = CH_UART2_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART2_RX, + .end = CH_UART2_RX, + .flags = IORESOURCE_DMA, + }, +}; + +unsigned short bfin_uart2_peripherals[] = { + P_UART2_TX, P_UART2_RX, 0 +}; + +static struct platform_device bfin_uart2_device = { + .name = "bfin-uart", + .id = 2, + .num_resources = ARRAY_SIZE(bfin_uart2_resources), + .resource = bfin_uart2_resources, + .dev = { + .platform_data = &bfin_uart2_peripherals, /* Passed to driver */ + }, +}; #endif #ifdef CONFIG_SERIAL_BFIN_UART3 +static struct resource bfin_uart3_resources[] = { { - .start = 0xFFC03100, - .end = 0xFFC031FF, + .start = UART3_DLL, + .end = UART3_RBR+2, .flags = IORESOURCE_MEM, }, + { + .start = IRQ_UART3_RX, + .end = IRQ_UART3_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART3_ERROR, + .end = IRQ_UART3_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART3_TX, + .end = CH_UART3_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART3_RX, + .end = CH_UART3_RX, + .flags = IORESOURCE_DMA, + }, +#ifdef CONFIG_BFIN_UART3_CTSRTS + { /* CTS pin -- 0 means not supported */ + .start = GPIO_PB3, + .end = GPIO_PB3, + .flags = IORESOURCE_IO, + }, + { /* RTS pin -- 0 means not supported */ + .start = GPIO_PB2, + .end = GPIO_PB2, + .flags = IORESOURCE_IO, + }, #endif }; -static struct platform_device bfin_uart_device = { +unsigned short bfin_uart3_peripherals[] = { + P_UART3_TX, P_UART3_RX, +#ifdef CONFIG_BFIN_UART3_CTSRTS + P_UART3_RTS, P_UART3_CTS, +#endif + 0 +}; + +static struct platform_device bfin_uart3_device = { .name = "bfin-uart", - .id = 1, - .num_resources = ARRAY_SIZE(bfin_uart_resources), - .resource = bfin_uart_resources, + .id = 3, + .num_resources = ARRAY_SIZE(bfin_uart3_resources), + .resource = bfin_uart3_resources, + .dev = { + .platform_data = &bfin_uart3_peripherals, /* Passed to driver */ + }, }; #endif +#endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 @@ -752,7 +919,18 @@ static struct platform_device *cm_bf548_devices[] __initdata = { #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) - &bfin_uart_device, +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART1 + &bfin_uart1_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART2 + &bfin_uart2_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART3 + &bfin_uart3_device, +#endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c index 60193f72777c..1ed026157737 100644 --- a/arch/blackfin/mach-bf548/boards/ezkit.c +++ b/arch/blackfin/mach-bf548/boards/ezkit.c @@ -232,44 +232,211 @@ static struct platform_device rtc_device = { #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) -static struct resource bfin_uart_resources[] = { #ifdef CONFIG_SERIAL_BFIN_UART0 +static struct resource bfin_uart0_resources[] = { { - .start = 0xFFC00400, - .end = 0xFFC004FF, + .start = UART0_DLL, + .end = UART0_RBR+2, .flags = IORESOURCE_MEM, }, + { + .start = IRQ_UART0_RX, + .end = IRQ_UART0_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART0_ERROR, + .end = IRQ_UART0_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART0_TX, + .end = CH_UART0_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART0_RX, + .end = CH_UART0_RX, + .flags = IORESOURCE_DMA, + }, +}; + +unsigned short bfin_uart0_peripherals[] = { + P_UART0_TX, P_UART0_RX, 0 +}; + +static struct platform_device bfin_uart0_device = { + .name = "bfin-uart", + .id = 0, + .num_resources = ARRAY_SIZE(bfin_uart0_resources), + .resource = bfin_uart0_resources, + .dev = { + .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ + }, +}; #endif #ifdef CONFIG_SERIAL_BFIN_UART1 +static struct resource bfin_uart1_resources[] = { { - .start = 0xFFC02000, - .end = 0xFFC020FF, + .start = UART1_DLL, + .end = UART1_RBR+2, .flags = IORESOURCE_MEM, }, + { + .start = IRQ_UART1_RX, + .end = IRQ_UART1_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART1_ERROR, + .end = IRQ_UART1_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART1_TX, + .end = CH_UART1_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART1_RX, + .end = CH_UART1_RX, + .flags = IORESOURCE_DMA, + }, +#ifdef CONFIG_BFIN_UART1_CTSRTS + { /* CTS pin -- 0 means not supported */ + .start = GPIO_PE10, + .end = GPIO_PE10, + .flags = IORESOURCE_IO, + }, + { /* RTS pin -- 0 means not supported */ + .start = GPIO_PE9, + .end = GPIO_PE9, + .flags = IORESOURCE_IO, + }, +#endif +}; + +unsigned short bfin_uart1_peripherals[] = { + P_UART1_TX, P_UART1_RX, +#ifdef CONFIG_BFIN_UART1_CTSRTS + P_UART1_RTS, P_UART1_CTS, +#endif + 0 +}; + +static struct platform_device bfin_uart1_device = { + .name = "bfin-uart", + .id = 1, + .num_resources = ARRAY_SIZE(bfin_uart1_resources), + .resource = bfin_uart1_resources, + .dev = { + .platform_data = &bfin_uart1_peripherals, /* Passed to driver */ + }, +}; #endif #ifdef CONFIG_SERIAL_BFIN_UART2 +static struct resource bfin_uart2_resources[] = { { - .start = 0xFFC02100, - .end = 0xFFC021FF, + .start = UART2_DLL, + .end = UART2_RBR+2, .flags = IORESOURCE_MEM, }, + { + .start = IRQ_UART2_RX, + .end = IRQ_UART2_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART2_ERROR, + .end = IRQ_UART2_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART2_TX, + .end = CH_UART2_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART2_RX, + .end = CH_UART2_RX, + .flags = IORESOURCE_DMA, + }, +}; + +unsigned short bfin_uart2_peripherals[] = { + P_UART2_TX, P_UART2_RX, 0 +}; + +static struct platform_device bfin_uart2_device = { + .name = "bfin-uart", + .id = 2, + .num_resources = ARRAY_SIZE(bfin_uart2_resources), + .resource = bfin_uart2_resources, + .dev = { + .platform_data = &bfin_uart2_peripherals, /* Passed to driver */ + }, +}; #endif #ifdef CONFIG_SERIAL_BFIN_UART3 +static struct resource bfin_uart3_resources[] = { { - .start = 0xFFC03100, - .end = 0xFFC031FF, + .start = UART3_DLL, + .end = UART3_RBR+2, .flags = IORESOURCE_MEM, }, + { + .start = IRQ_UART3_RX, + .end = IRQ_UART3_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART3_ERROR, + .end = IRQ_UART3_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART3_TX, + .end = CH_UART3_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART3_RX, + .end = CH_UART3_RX, + .flags = IORESOURCE_DMA, + }, +#ifdef CONFIG_BFIN_UART3_CTSRTS + { /* CTS pin -- 0 means not supported */ + .start = GPIO_PB3, + .end = GPIO_PB3, + .flags = IORESOURCE_IO, + }, + { /* RTS pin -- 0 means not supported */ + .start = GPIO_PB2, + .end = GPIO_PB2, + .flags = IORESOURCE_IO, + }, #endif }; -static struct platform_device bfin_uart_device = { +unsigned short bfin_uart3_peripherals[] = { + P_UART3_TX, P_UART3_RX, +#ifdef CONFIG_BFIN_UART3_CTSRTS + P_UART3_RTS, P_UART3_CTS, +#endif + 0 +}; + +static struct platform_device bfin_uart3_device = { .name = "bfin-uart", - .id = 1, - .num_resources = ARRAY_SIZE(bfin_uart_resources), - .resource = bfin_uart_resources, + .id = 3, + .num_resources = ARRAY_SIZE(bfin_uart3_resources), + .resource = bfin_uart3_resources, + .dev = { + .platform_data = &bfin_uart3_peripherals, /* Passed to driver */ + }, }; #endif +#endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 @@ -960,7 +1127,18 @@ static struct platform_device *ezkit_devices[] __initdata = { #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) - &bfin_uart_device, +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART1 + &bfin_uart1_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART2 + &bfin_uart2_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART3 + &bfin_uart3_device, +#endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c index dfc8d5b77986..818266f6348f 100644 --- a/arch/blackfin/mach-bf561/boards/cm_bf561.c +++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c @@ -305,21 +305,50 @@ static struct platform_device isp1362_hcd_device = { #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) -static struct resource bfin_uart_resources[] = { +#ifdef CONFIG_SERIAL_BFIN_UART0 +static struct resource bfin_uart0_resources[] = { { - .start = 0xFFC00400, - .end = 0xFFC004FF, + .start = BFIN_UART_THR, + .end = BFIN_UART_GCTL+2, .flags = IORESOURCE_MEM, }, + { + .start = IRQ_UART_RX, + .end = IRQ_UART_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART_ERROR, + .end = IRQ_UART_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART_TX, + .end = CH_UART_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART_RX, + .end = CH_UART_RX, + .flags = IORESOURCE_DMA, + }, +}; + +unsigned short bfin_uart0_peripherals[] = { + P_UART0_TX, P_UART0_RX, 0 }; -static struct platform_device bfin_uart_device = { +static struct platform_device bfin_uart0_device = { .name = "bfin-uart", - .id = 1, - .num_resources = ARRAY_SIZE(bfin_uart_resources), - .resource = bfin_uart_resources, + .id = 0, + .num_resources = ARRAY_SIZE(bfin_uart0_resources), + .resource = bfin_uart0_resources, + .dev = { + .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ + }, }; #endif +#endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 @@ -463,7 +492,9 @@ static struct platform_device *cm_bf561_devices[] __initdata = { #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) - &bfin_uart_device, +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c index ffd3e6a80d1a..c59041c513e0 100644 --- a/arch/blackfin/mach-bf561/boards/ezkit.c +++ b/arch/blackfin/mach-bf561/boards/ezkit.c @@ -160,21 +160,50 @@ static struct platform_device smc91x_device = { #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) -static struct resource bfin_uart_resources[] = { +#ifdef CONFIG_SERIAL_BFIN_UART0 +static struct resource bfin_uart0_resources[] = { { - .start = 0xFFC00400, - .end = 0xFFC004FF, + .start = BFIN_UART_THR, + .end = BFIN_UART_GCTL+2, .flags = IORESOURCE_MEM, }, + { + .start = IRQ_UART_RX, + .end = IRQ_UART_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART_ERROR, + .end = IRQ_UART_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART_TX, + .end = CH_UART_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART_RX, + .end = CH_UART_RX, + .flags = IORESOURCE_DMA, + }, +}; + +unsigned short bfin_uart0_peripherals[] = { + P_UART0_TX, P_UART0_RX, 0 }; -static struct platform_device bfin_uart_device = { +static struct platform_device bfin_uart0_device = { .name = "bfin-uart", - .id = 1, - .num_resources = ARRAY_SIZE(bfin_uart_resources), - .resource = bfin_uart_resources, + .id = 0, + .num_resources = ARRAY_SIZE(bfin_uart0_resources), + .resource = bfin_uart0_resources, + .dev = { + .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ + }, }; #endif +#endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 @@ -412,7 +441,9 @@ static struct platform_device *ezkit_devices[] __initdata = { #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) - &bfin_uart_device, +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) diff --git a/arch/blackfin/mach-bf561/boards/tepla.c b/arch/blackfin/mach-bf561/boards/tepla.c index 8ba7252455e1..6c33397355b3 100644 --- a/arch/blackfin/mach-bf561/boards/tepla.c +++ b/arch/blackfin/mach-bf561/boards/tepla.c @@ -42,6 +42,52 @@ static struct platform_device smc91x_device = { .resource = smc91x_resources, }; +#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) +#ifdef CONFIG_SERIAL_BFIN_UART0 +static struct resource bfin_uart0_resources[] = { + { + .start = BFIN_UART_THR, + .end = BFIN_UART_GCTL+2, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_UART_RX, + .end = IRQ_UART_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART_ERROR, + .end = IRQ_UART_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART_TX, + .end = CH_UART_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART_RX, + .end = CH_UART_RX, + .flags = IORESOURCE_DMA, + }, +}; + +unsigned short bfin_uart0_peripherals[] = { + P_UART0_TX, P_UART0_RX, 0 +}; + +static struct platform_device bfin_uart0_device = { + .name = "bfin-uart", + .id = 0, + .num_resources = ARRAY_SIZE(bfin_uart0_resources), + .resource = bfin_uart0_resources, + .dev = { + .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ + }, +}; +#endif +#endif + #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 static struct resource bfin_sir0_resources[] = { @@ -73,6 +119,13 @@ static struct platform_device bfin_sir0_device = { static struct platform_device *tepla_devices[] __initdata = { &smc91x_device, + +#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#endif + #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 &bfin_sir0_device, -- cgit v1.2.3 From 08a54bffaf7d5dc9391518323234ce9f9ea4be8e Mon Sep 17 00:00:00 2001 From: Barry Song Date: Fri, 18 Sep 2009 09:14:38 +0000 Subject: Blackfin: bf537-stamp: ad1938: switch to GPIO CS Signed-off-by: Barry Song Signed-off-by: Mike Frysinger --- arch/blackfin/mach-bf537/boards/stamp.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c index 9842288c4f1c..554f82d6f9da 100644 --- a/arch/blackfin/mach-bf537/boards/stamp.c +++ b/arch/blackfin/mach-bf537/boards/stamp.c @@ -528,7 +528,6 @@ static struct bfin5xx_spi_chip ad1836_spi_chip_info = { static struct bfin5xx_spi_chip ad1938_spi_chip_info = { .enable_dma = 0, .bits_per_word = 8, - .cs_gpio = GPIO_PF5, }; #endif @@ -910,7 +909,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { .modalias = "ad1938", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, - .chip_select = 0,/* CONFIG_SND_BLACKFIN_SPI_PFBIT */ + .chip_select = 5, .controller_data = &ad1938_spi_chip_info, .mode = SPI_MODE_3, }, -- cgit v1.2.3 From df5de261306d9bfc1ed9121595593b10a7626b95 Mon Sep 17 00:00:00 2001 From: Sonic Zhang Date: Wed, 23 Sep 2009 05:01:56 +0000 Subject: Blackfin: move SPORT UART resources to boards files Rather than keeping the pins in the actual driver and worrying about a mess of Kconfig options, declare all the desired pin resources in the boards file. This lets people easily select the specific pins/ports for the normal emulated UART as well as GPIOs for CTS/RTS. Signed-off-by: Sonic Zhang Signed-off-by: Mike Frysinger --- arch/blackfin/mach-bf518/boards/ezbrd.c | 63 +++++++++++ arch/blackfin/mach-bf527/boards/cm_bf527.c | 63 +++++++++++ arch/blackfin/mach-bf527/boards/ezbrd.c | 63 +++++++++++ arch/blackfin/mach-bf527/boards/ezkit.c | 63 +++++++++++ arch/blackfin/mach-bf533/boards/blackstamp.c | 63 +++++++++++ arch/blackfin/mach-bf533/boards/cm_bf533.c | 63 +++++++++++ arch/blackfin/mach-bf533/boards/stamp.c | 63 +++++++++++ arch/blackfin/mach-bf537/boards/cm_bf537e.c | 63 +++++++++++ arch/blackfin/mach-bf537/boards/cm_bf537u.c | 63 +++++++++++ arch/blackfin/mach-bf537/boards/minotaur.c | 63 +++++++++++ arch/blackfin/mach-bf537/boards/stamp.c | 67 +++++++++++- arch/blackfin/mach-bf537/boards/tcm_bf537.c | 63 +++++++++++ arch/blackfin/mach-bf538/boards/ezkit.c | 154 +++++++++++++++++++++++++++ arch/blackfin/mach-bf548/boards/cm_bf548.c | 154 +++++++++++++++++++++++++++ arch/blackfin/mach-bf548/boards/ezkit.c | 154 +++++++++++++++++++++++++++ 15 files changed, 1220 insertions(+), 2 deletions(-) diff --git a/arch/blackfin/mach-bf518/boards/ezbrd.c b/arch/blackfin/mach-bf518/boards/ezbrd.c index 0d1b021ffa69..06e4316912d3 100644 --- a/arch/blackfin/mach-bf518/boards/ezbrd.c +++ b/arch/blackfin/mach-bf518/boards/ezbrd.c @@ -562,16 +562,75 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = { }; #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART +static struct resource bfin_sport0_uart_resources[] = { + { + .start = SPORT0_TCR1, + .end = SPORT0_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT0_RX, + .end = IRQ_SPORT0_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT0_ERROR, + .end = IRQ_SPORT0_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport0_peripherals[] = { + P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, + P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 +}; + static struct platform_device bfin_sport0_uart_device = { .name = "bfin-sport-uart", .id = 0, + .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources), + .resource = bfin_sport0_uart_resources, + .dev = { + .platform_data = &bfin_sport0_peripherals, /* Passed to driver */ + }, +}; +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART +static struct resource bfin_sport1_uart_resources[] = { + { + .start = SPORT1_TCR1, + .end = SPORT1_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT1_RX, + .end = IRQ_SPORT1_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT1_ERROR, + .end = IRQ_SPORT1_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport1_peripherals[] = { + P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, + P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0 }; static struct platform_device bfin_sport1_uart_device = { .name = "bfin-sport-uart", .id = 1, + .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources), + .resource = bfin_sport1_uart_resources, + .dev = { + .platform_data = &bfin_sport1_peripherals, /* Passed to driver */ + }, }; #endif +#endif #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) #include @@ -678,9 +737,13 @@ static struct platform_device *stamp_devices[] __initdata = { #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif +#endif #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) &bfin_device_gpiokeys, diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c index 2d93c1f520a2..3543bdf3268e 100644 --- a/arch/blackfin/mach-bf527/boards/cm_bf527.c +++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c @@ -800,16 +800,75 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = { }; #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART +static struct resource bfin_sport0_uart_resources[] = { + { + .start = SPORT0_TCR1, + .end = SPORT0_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT0_RX, + .end = IRQ_SPORT0_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT0_ERROR, + .end = IRQ_SPORT0_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport0_peripherals[] = { + P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, + P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 +}; + static struct platform_device bfin_sport0_uart_device = { .name = "bfin-sport-uart", .id = 0, + .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources), + .resource = bfin_sport0_uart_resources, + .dev = { + .platform_data = &bfin_sport0_peripherals, /* Passed to driver */ + }, +}; +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART +static struct resource bfin_sport1_uart_resources[] = { + { + .start = SPORT1_TCR1, + .end = SPORT1_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT1_RX, + .end = IRQ_SPORT1_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT1_ERROR, + .end = IRQ_SPORT1_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport1_peripherals[] = { + P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, + P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0 }; static struct platform_device bfin_sport1_uart_device = { .name = "bfin-sport-uart", .id = 1, + .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources), + .resource = bfin_sport1_uart_resources, + .dev = { + .platform_data = &bfin_sport1_peripherals, /* Passed to driver */ + }, }; #endif +#endif #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) #include @@ -926,9 +985,13 @@ static struct platform_device *cmbf527_devices[] __initdata = { #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif +#endif #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) &bfin_device_gpiokeys, diff --git a/arch/blackfin/mach-bf527/boards/ezbrd.c b/arch/blackfin/mach-bf527/boards/ezbrd.c index 3a981e855668..a99955d11391 100644 --- a/arch/blackfin/mach-bf527/boards/ezbrd.c +++ b/arch/blackfin/mach-bf527/boards/ezbrd.c @@ -631,16 +631,75 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = { }; #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART +static struct resource bfin_sport0_uart_resources[] = { + { + .start = SPORT0_TCR1, + .end = SPORT0_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT0_RX, + .end = IRQ_SPORT0_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT0_ERROR, + .end = IRQ_SPORT0_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport0_peripherals[] = { + P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, + P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 +}; + static struct platform_device bfin_sport0_uart_device = { .name = "bfin-sport-uart", .id = 0, + .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources), + .resource = bfin_sport0_uart_resources, + .dev = { + .platform_data = &bfin_sport0_peripherals, /* Passed to driver */ + }, +}; +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART +static struct resource bfin_sport1_uart_resources[] = { + { + .start = SPORT1_TCR1, + .end = SPORT1_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT1_RX, + .end = IRQ_SPORT1_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT1_ERROR, + .end = IRQ_SPORT1_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport1_peripherals[] = { + P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, + P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0 }; static struct platform_device bfin_sport1_uart_device = { .name = "bfin-sport-uart", .id = 1, + .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources), + .resource = bfin_sport1_uart_resources, + .dev = { + .platform_data = &bfin_sport1_peripherals, /* Passed to driver */ + }, }; #endif +#endif #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) #include @@ -766,9 +825,13 @@ static struct platform_device *stamp_devices[] __initdata = { #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif +#endif #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) &bfin_device_gpiokeys, diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c index 0750e655fb22..f93323dd5b51 100644 --- a/arch/blackfin/mach-bf527/boards/ezkit.c +++ b/arch/blackfin/mach-bf527/boards/ezkit.c @@ -844,16 +844,75 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = { }; #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART +static struct resource bfin_sport0_uart_resources[] = { + { + .start = SPORT0_TCR1, + .end = SPORT0_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT0_RX, + .end = IRQ_SPORT0_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT0_ERROR, + .end = IRQ_SPORT0_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport0_peripherals[] = { + P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, + P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 +}; + static struct platform_device bfin_sport0_uart_device = { .name = "bfin-sport-uart", .id = 0, + .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources), + .resource = bfin_sport0_uart_resources, + .dev = { + .platform_data = &bfin_sport0_peripherals, /* Passed to driver */ + }, +}; +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART +static struct resource bfin_sport1_uart_resources[] = { + { + .start = SPORT1_TCR1, + .end = SPORT1_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT1_RX, + .end = IRQ_SPORT1_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT1_ERROR, + .end = IRQ_SPORT1_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport1_peripherals[] = { + P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, + P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0 }; static struct platform_device bfin_sport1_uart_device = { .name = "bfin-sport-uart", .id = 1, + .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources), + .resource = bfin_sport1_uart_resources, + .dev = { + .platform_data = &bfin_sport1_peripherals, /* Passed to driver */ + }, }; #endif +#endif #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) #include @@ -1007,9 +1066,13 @@ static struct platform_device *stamp_devices[] __initdata = { #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif +#endif #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) &bfin_device_gpiokeys, diff --git a/arch/blackfin/mach-bf533/boards/blackstamp.c b/arch/blackfin/mach-bf533/boards/blackstamp.c index 50ce3ecfdcb2..4d9294642f10 100644 --- a/arch/blackfin/mach-bf533/boards/blackstamp.c +++ b/arch/blackfin/mach-bf533/boards/blackstamp.c @@ -270,16 +270,75 @@ static struct platform_device bfin_sir0_device = { #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART +static struct resource bfin_sport0_uart_resources[] = { + { + .start = SPORT0_TCR1, + .end = SPORT0_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT0_RX, + .end = IRQ_SPORT0_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT0_ERROR, + .end = IRQ_SPORT0_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport0_peripherals[] = { + P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, + P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 +}; + static struct platform_device bfin_sport0_uart_device = { .name = "bfin-sport-uart", .id = 0, + .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources), + .resource = bfin_sport0_uart_resources, + .dev = { + .platform_data = &bfin_sport0_peripherals, /* Passed to driver */ + }, +}; +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART +static struct resource bfin_sport1_uart_resources[] = { + { + .start = SPORT1_TCR1, + .end = SPORT1_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT1_RX, + .end = IRQ_SPORT1_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT1_ERROR, + .end = IRQ_SPORT1_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport1_peripherals[] = { + P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, + P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0 }; static struct platform_device bfin_sport1_uart_device = { .name = "bfin-sport-uart", .id = 1, + .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources), + .resource = bfin_sport1_uart_resources, + .dev = { + .platform_data = &bfin_sport1_peripherals, /* Passed to driver */ + }, }; #endif +#endif #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) #include @@ -385,9 +444,13 @@ static struct platform_device *stamp_devices[] __initdata = { #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif +#endif #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) &bfin_device_gpiokeys, diff --git a/arch/blackfin/mach-bf533/boards/cm_bf533.c b/arch/blackfin/mach-bf533/boards/cm_bf533.c index f707faf8a864..33ff27a4a474 100644 --- a/arch/blackfin/mach-bf533/boards/cm_bf533.c +++ b/arch/blackfin/mach-bf533/boards/cm_bf533.c @@ -317,16 +317,75 @@ static struct platform_device bfin_sir0_device = { #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART +static struct resource bfin_sport0_uart_resources[] = { + { + .start = SPORT0_TCR1, + .end = SPORT0_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT0_RX, + .end = IRQ_SPORT0_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT0_ERROR, + .end = IRQ_SPORT0_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport0_peripherals[] = { + P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, + P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 +}; + static struct platform_device bfin_sport0_uart_device = { .name = "bfin-sport-uart", .id = 0, + .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources), + .resource = bfin_sport0_uart_resources, + .dev = { + .platform_data = &bfin_sport0_peripherals, /* Passed to driver */ + }, +}; +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART +static struct resource bfin_sport1_uart_resources[] = { + { + .start = SPORT1_TCR1, + .end = SPORT1_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT1_RX, + .end = IRQ_SPORT1_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT1_ERROR, + .end = IRQ_SPORT1_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport1_peripherals[] = { + P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, + P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0 }; static struct platform_device bfin_sport1_uart_device = { .name = "bfin-sport-uart", .id = 1, + .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources), + .resource = bfin_sport1_uart_resources, + .dev = { + .platform_data = &bfin_sport1_peripherals, /* Passed to driver */ + }, }; #endif +#endif #if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) static struct resource isp1362_hcd_resources[] = { @@ -473,9 +532,13 @@ static struct platform_device *cm_bf533_devices[] __initdata = { #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif +#endif #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) &rtc_device, diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c index 2683e5f2294f..428f724dd080 100644 --- a/arch/blackfin/mach-bf533/boards/stamp.c +++ b/arch/blackfin/mach-bf533/boards/stamp.c @@ -397,16 +397,75 @@ static struct platform_device bfin_sir0_device = { #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART +static struct resource bfin_sport0_uart_resources[] = { + { + .start = SPORT0_TCR1, + .end = SPORT0_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT0_RX, + .end = IRQ_SPORT0_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT0_ERROR, + .end = IRQ_SPORT0_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport0_peripherals[] = { + P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, + P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 +}; + static struct platform_device bfin_sport0_uart_device = { .name = "bfin-sport-uart", .id = 0, + .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources), + .resource = bfin_sport0_uart_resources, + .dev = { + .platform_data = &bfin_sport0_peripherals, /* Passed to driver */ + }, +}; +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART +static struct resource bfin_sport1_uart_resources[] = { + { + .start = SPORT1_TCR1, + .end = SPORT1_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT1_RX, + .end = IRQ_SPORT1_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT1_ERROR, + .end = IRQ_SPORT1_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport1_peripherals[] = { + P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, + P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0 }; static struct platform_device bfin_sport1_uart_device = { .name = "bfin-sport-uart", .id = 1, + .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources), + .resource = bfin_sport1_uart_resources, + .dev = { + .platform_data = &bfin_sport1_peripherals, /* Passed to driver */ + }, }; #endif +#endif #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) #include @@ -536,9 +595,13 @@ static struct platform_device *stamp_devices[] __initdata = { #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif +#endif #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) &bfin_device_gpiokeys, diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537e.c b/arch/blackfin/mach-bf537/boards/cm_bf537e.c index abb32ddc5122..b4837776b274 100644 --- a/arch/blackfin/mach-bf537/boards/cm_bf537e.c +++ b/arch/blackfin/mach-bf537/boards/cm_bf537e.c @@ -526,16 +526,75 @@ static struct platform_device i2c_bfin_twi_device = { #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART +static struct resource bfin_sport0_uart_resources[] = { + { + .start = SPORT0_TCR1, + .end = SPORT0_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT0_RX, + .end = IRQ_SPORT0_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT0_ERROR, + .end = IRQ_SPORT0_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport0_peripherals[] = { + P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, + P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 +}; + static struct platform_device bfin_sport0_uart_device = { .name = "bfin-sport-uart", .id = 0, + .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources), + .resource = bfin_sport0_uart_resources, + .dev = { + .platform_data = &bfin_sport0_peripherals, /* Passed to driver */ + }, +}; +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART +static struct resource bfin_sport1_uart_resources[] = { + { + .start = SPORT1_TCR1, + .end = SPORT1_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT1_RX, + .end = IRQ_SPORT1_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT1_ERROR, + .end = IRQ_SPORT1_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport1_peripherals[] = { + P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, + P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0 }; static struct platform_device bfin_sport1_uart_device = { .name = "bfin-sport-uart", .id = 1, + .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources), + .resource = bfin_sport1_uart_resources, + .dev = { + .platform_data = &bfin_sport1_peripherals, /* Passed to driver */ + }, }; #endif +#endif #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) static struct platform_device bfin_mii_bus = { @@ -647,9 +706,13 @@ static struct platform_device *cm_bf537e_devices[] __initdata = { #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif +#endif #if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) &isp1362_hcd_device, diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537u.c b/arch/blackfin/mach-bf537/boards/cm_bf537u.c index b3182face957..c6802be65fb1 100644 --- a/arch/blackfin/mach-bf537/boards/cm_bf537u.c +++ b/arch/blackfin/mach-bf537/boards/cm_bf537u.c @@ -491,16 +491,75 @@ static struct platform_device i2c_bfin_twi_device = { #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART +static struct resource bfin_sport0_uart_resources[] = { + { + .start = SPORT0_TCR1, + .end = SPORT0_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT0_RX, + .end = IRQ_SPORT0_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT0_ERROR, + .end = IRQ_SPORT0_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport0_peripherals[] = { + P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, + P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 +}; + static struct platform_device bfin_sport0_uart_device = { .name = "bfin-sport-uart", .id = 0, + .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources), + .resource = bfin_sport0_uart_resources, + .dev = { + .platform_data = &bfin_sport0_peripherals, /* Passed to driver */ + }, +}; +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART +static struct resource bfin_sport1_uart_resources[] = { + { + .start = SPORT1_TCR1, + .end = SPORT1_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT1_RX, + .end = IRQ_SPORT1_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT1_ERROR, + .end = IRQ_SPORT1_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport1_peripherals[] = { + P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, + P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0 }; static struct platform_device bfin_sport1_uart_device = { .name = "bfin-sport-uart", .id = 1, + .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources), + .resource = bfin_sport1_uart_resources, + .dev = { + .platform_data = &bfin_sport1_peripherals, /* Passed to driver */ + }, }; #endif +#endif #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) static struct platform_device bfin_mii_bus = { @@ -612,9 +671,13 @@ static struct platform_device *cm_bf537u_devices[] __initdata = { #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif +#endif #if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) &isp1362_hcd_device, diff --git a/arch/blackfin/mach-bf537/boards/minotaur.c b/arch/blackfin/mach-bf537/boards/minotaur.c index 58d6a20e7a0d..6403964490d9 100644 --- a/arch/blackfin/mach-bf537/boards/minotaur.c +++ b/arch/blackfin/mach-bf537/boards/minotaur.c @@ -377,16 +377,75 @@ static struct platform_device i2c_bfin_twi_device = { #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART +static struct resource bfin_sport0_uart_resources[] = { + { + .start = SPORT0_TCR1, + .end = SPORT0_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT0_RX, + .end = IRQ_SPORT0_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT0_ERROR, + .end = IRQ_SPORT0_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport0_peripherals[] = { + P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, + P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 +}; + static struct platform_device bfin_sport0_uart_device = { .name = "bfin-sport-uart", .id = 0, + .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources), + .resource = bfin_sport0_uart_resources, + .dev = { + .platform_data = &bfin_sport0_peripherals, /* Passed to driver */ + }, +}; +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART +static struct resource bfin_sport1_uart_resources[] = { + { + .start = SPORT1_TCR1, + .end = SPORT1_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT1_RX, + .end = IRQ_SPORT1_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT1_ERROR, + .end = IRQ_SPORT1_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport1_peripherals[] = { + P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, + P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0 }; static struct platform_device bfin_sport1_uart_device = { .name = "bfin-sport-uart", .id = 1, + .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources), + .resource = bfin_sport1_uart_resources, + .dev = { + .platform_data = &bfin_sport1_peripherals, /* Passed to driver */ + }, }; #endif +#endif static struct platform_device *minotaur_devices[] __initdata = { #if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE) @@ -433,9 +492,13 @@ static struct platform_device *minotaur_devices[] __initdata = { #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif +#endif }; diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c index 554f82d6f9da..df4b6b5c08d4 100644 --- a/arch/blackfin/mach-bf537/boards/stamp.c +++ b/arch/blackfin/mach-bf537/boards/stamp.c @@ -1677,16 +1677,75 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = { }; #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART +static struct resource bfin_sport0_uart_resources[] = { + { + .start = SPORT0_TCR1, + .end = SPORT0_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT0_RX, + .end = IRQ_SPORT0_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT0_ERROR, + .end = IRQ_SPORT0_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport0_peripherals[] = { + P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, + P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 +}; + static struct platform_device bfin_sport0_uart_device = { .name = "bfin-sport-uart", .id = 0, + .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources), + .resource = bfin_sport0_uart_resources, + .dev = { + .platform_data = &bfin_sport0_peripherals, /* Passed to driver */ + }, +}; +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART +static struct resource bfin_sport1_uart_resources[] = { + { + .start = SPORT1_TCR1, + .end = SPORT1_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT1_RX, + .end = IRQ_SPORT1_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT1_ERROR, + .end = IRQ_SPORT1_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport1_peripherals[] = { + P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, + P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0 }; static struct platform_device bfin_sport1_uart_device = { .name = "bfin-sport-uart", .id = 1, + .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources), + .resource = bfin_sport1_uart_resources, + .dev = { + .platform_data = &bfin_sport1_peripherals, /* Passed to driver */ + }, }; #endif +#endif #if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE) #define CF_IDE_NAND_CARD_USE_HDD_INTERFACE @@ -1867,9 +1926,13 @@ static struct platform_device *stamp_devices[] __initdata = { #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif +#endif #if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE) &bfin_pata_device, @@ -1895,10 +1958,10 @@ static struct platform_device *stamp_devices[] __initdata = { static int __init stamp_init(void) { printk(KERN_INFO "%s(): registering device resources\n", __func__); - i2c_register_board_info(0, bfin_i2c_board_info, - ARRAY_SIZE(bfin_i2c_board_info)); bfin_plat_nand_init(); platform_add_devices(stamp_devices, ARRAY_SIZE(stamp_devices)); + i2c_register_board_info(0, bfin_i2c_board_info, + ARRAY_SIZE(bfin_i2c_board_info)); spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); return 0; diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c index d77ca5b75685..69a2fb804724 100644 --- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c +++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c @@ -493,16 +493,75 @@ static struct platform_device i2c_bfin_twi_device = { #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART +static struct resource bfin_sport0_uart_resources[] = { + { + .start = SPORT0_TCR1, + .end = SPORT0_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT0_RX, + .end = IRQ_SPORT0_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT0_ERROR, + .end = IRQ_SPORT0_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport0_peripherals[] = { + P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, + P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 +}; + static struct platform_device bfin_sport0_uart_device = { .name = "bfin-sport-uart", .id = 0, + .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources), + .resource = bfin_sport0_uart_resources, + .dev = { + .platform_data = &bfin_sport0_peripherals, /* Passed to driver */ + }, +}; +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART +static struct resource bfin_sport1_uart_resources[] = { + { + .start = SPORT1_TCR1, + .end = SPORT1_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT1_RX, + .end = IRQ_SPORT1_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT1_ERROR, + .end = IRQ_SPORT1_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport1_peripherals[] = { + P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, + P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0 }; static struct platform_device bfin_sport1_uart_device = { .name = "bfin-sport-uart", .id = 1, + .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources), + .resource = bfin_sport1_uart_resources, + .dev = { + .platform_data = &bfin_sport1_peripherals, /* Passed to driver */ + }, }; #endif +#endif #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) static struct platform_device bfin_mii_bus = { @@ -614,9 +673,13 @@ static struct platform_device *cm_bf537_devices[] __initdata = { #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif +#endif #if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) &isp1362_hcd_device, diff --git a/arch/blackfin/mach-bf538/boards/ezkit.c b/arch/blackfin/mach-bf538/boards/ezkit.c index 1912bc4555aa..d7c6b9f18b5e 100644 --- a/arch/blackfin/mach-bf538/boards/ezkit.c +++ b/arch/blackfin/mach-bf538/boards/ezkit.c @@ -262,6 +262,145 @@ static struct platform_device bfin_sir2_device = { #endif #endif +#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART +static struct resource bfin_sport0_uart_resources[] = { + { + .start = SPORT0_TCR1, + .end = SPORT0_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT0_RX, + .end = IRQ_SPORT0_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT0_ERROR, + .end = IRQ_SPORT0_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport0_peripherals[] = { + P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, + P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 +}; + +static struct platform_device bfin_sport0_uart_device = { + .name = "bfin-sport-uart", + .id = 0, + .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources), + .resource = bfin_sport0_uart_resources, + .dev = { + .platform_data = &bfin_sport0_peripherals, /* Passed to driver */ + }, +}; +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART +static struct resource bfin_sport1_uart_resources[] = { + { + .start = SPORT1_TCR1, + .end = SPORT1_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT1_RX, + .end = IRQ_SPORT1_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT1_ERROR, + .end = IRQ_SPORT1_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport1_peripherals[] = { + P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, + P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0 +}; + +static struct platform_device bfin_sport1_uart_device = { + .name = "bfin-sport-uart", + .id = 1, + .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources), + .resource = bfin_sport1_uart_resources, + .dev = { + .platform_data = &bfin_sport1_peripherals, /* Passed to driver */ + }, +}; +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART +static struct resource bfin_sport2_uart_resources[] = { + { + .start = SPORT2_TCR1, + .end = SPORT2_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT2_RX, + .end = IRQ_SPORT2_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT2_ERROR, + .end = IRQ_SPORT2_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport2_peripherals[] = { + P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS, + P_SPORT2_DRPRI, P_SPORT2_RSCLK, P_SPORT2_DRSEC, P_SPORT2_DTSEC, 0 +}; + +static struct platform_device bfin_sport2_uart_device = { + .name = "bfin-sport-uart", + .id = 2, + .num_resources = ARRAY_SIZE(bfin_sport2_uart_resources), + .resource = bfin_sport2_uart_resources, + .dev = { + .platform_data = &bfin_sport2_peripherals, /* Passed to driver */ + }, +}; +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART +static struct resource bfin_sport3_uart_resources[] = { + { + .start = SPORT3_TCR1, + .end = SPORT3_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT3_RX, + .end = IRQ_SPORT3_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT3_ERROR, + .end = IRQ_SPORT3_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport3_peripherals[] = { + P_SPORT3_TFS, P_SPORT3_DTPRI, P_SPORT3_TSCLK, P_SPORT3_RFS, + P_SPORT3_DRPRI, P_SPORT3_RSCLK, P_SPORT3_DRSEC, P_SPORT3_DTSEC, 0 +}; + +static struct platform_device bfin_sport3_uart_device = { + .name = "bfin-sport-uart", + .id = 3, + .num_resources = ARRAY_SIZE(bfin_sport3_uart_resources), + .resource = bfin_sport3_uart_resources, + .dev = { + .platform_data = &bfin_sport3_peripherals, /* Passed to driver */ + }, +}; +#endif +#endif + #if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE) unsigned short bfin_can_peripherals[] = { P_CAN0_RX, P_CAN0_TX, 0 @@ -767,6 +906,21 @@ static struct platform_device *cm_bf538_devices[] __initdata = { #endif #endif +#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART + &bfin_sport0_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART + &bfin_sport1_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART + &bfin_sport2_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART + &bfin_sport3_uart_device, +#endif +#endif + #if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE) &bfin_can_device, #endif diff --git a/arch/blackfin/mach-bf548/boards/cm_bf548.c b/arch/blackfin/mach-bf548/boards/cm_bf548.c index 635f92723e12..cb7458792196 100644 --- a/arch/blackfin/mach-bf548/boards/cm_bf548.c +++ b/arch/blackfin/mach-bf548/boards/cm_bf548.c @@ -526,6 +526,145 @@ static struct platform_device musb_device = { }; #endif +#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART +static struct resource bfin_sport0_uart_resources[] = { + { + .start = SPORT0_TCR1, + .end = SPORT0_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT0_RX, + .end = IRQ_SPORT0_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT0_ERROR, + .end = IRQ_SPORT0_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport0_peripherals[] = { + P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, + P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 +}; + +static struct platform_device bfin_sport0_uart_device = { + .name = "bfin-sport-uart", + .id = 0, + .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources), + .resource = bfin_sport0_uart_resources, + .dev = { + .platform_data = &bfin_sport0_peripherals, /* Passed to driver */ + }, +}; +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART +static struct resource bfin_sport1_uart_resources[] = { + { + .start = SPORT1_TCR1, + .end = SPORT1_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT1_RX, + .end = IRQ_SPORT1_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT1_ERROR, + .end = IRQ_SPORT1_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport1_peripherals[] = { + P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, + P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0 +}; + +static struct platform_device bfin_sport1_uart_device = { + .name = "bfin-sport-uart", + .id = 1, + .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources), + .resource = bfin_sport1_uart_resources, + .dev = { + .platform_data = &bfin_sport1_peripherals, /* Passed to driver */ + }, +}; +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART +static struct resource bfin_sport2_uart_resources[] = { + { + .start = SPORT2_TCR1, + .end = SPORT2_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT2_RX, + .end = IRQ_SPORT2_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT2_ERROR, + .end = IRQ_SPORT2_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport2_peripherals[] = { + P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS, + P_SPORT2_DRPRI, P_SPORT2_RSCLK, P_SPORT2_DRSEC, P_SPORT2_DTSEC, 0 +}; + +static struct platform_device bfin_sport2_uart_device = { + .name = "bfin-sport-uart", + .id = 2, + .num_resources = ARRAY_SIZE(bfin_sport2_uart_resources), + .resource = bfin_sport2_uart_resources, + .dev = { + .platform_data = &bfin_sport2_peripherals, /* Passed to driver */ + }, +}; +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART +static struct resource bfin_sport3_uart_resources[] = { + { + .start = SPORT3_TCR1, + .end = SPORT3_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT3_RX, + .end = IRQ_SPORT3_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT3_ERROR, + .end = IRQ_SPORT3_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport3_peripherals[] = { + P_SPORT3_TFS, P_SPORT3_DTPRI, P_SPORT3_TSCLK, P_SPORT3_RFS, + P_SPORT3_DRPRI, P_SPORT3_RSCLK, P_SPORT3_DRSEC, P_SPORT3_DTSEC, 0 +}; + +static struct platform_device bfin_sport3_uart_device = { + .name = "bfin-sport-uart", + .id = 3, + .num_resources = ARRAY_SIZE(bfin_sport3_uart_resources), + .resource = bfin_sport3_uart_resources, + .dev = { + .platform_data = &bfin_sport3_peripherals, /* Passed to driver */ + }, +}; +#endif +#endif + #if defined(CONFIG_PATA_BF54X) || defined(CONFIG_PATA_BF54X_MODULE) static struct resource bfin_atapi_resources[] = { { @@ -960,6 +1099,21 @@ static struct platform_device *cm_bf548_devices[] __initdata = { &musb_device, #endif +#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART + &bfin_sport0_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART + &bfin_sport1_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART + &bfin_sport2_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART + &bfin_sport3_uart_device, +#endif +#endif + #if defined(CONFIG_PATA_BF54X) || defined(CONFIG_PATA_BF54X_MODULE) &bfin_atapi_device, #endif diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c index 1ed026157737..97033fea102e 100644 --- a/arch/blackfin/mach-bf548/boards/ezkit.c +++ b/arch/blackfin/mach-bf548/boards/ezkit.c @@ -631,6 +631,145 @@ static struct platform_device musb_device = { }; #endif +#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART +static struct resource bfin_sport0_uart_resources[] = { + { + .start = SPORT0_TCR1, + .end = SPORT0_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT0_RX, + .end = IRQ_SPORT0_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT0_ERROR, + .end = IRQ_SPORT0_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport0_peripherals[] = { + P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, + P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 +}; + +static struct platform_device bfin_sport0_uart_device = { + .name = "bfin-sport-uart", + .id = 0, + .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources), + .resource = bfin_sport0_uart_resources, + .dev = { + .platform_data = &bfin_sport0_peripherals, /* Passed to driver */ + }, +}; +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART +static struct resource bfin_sport1_uart_resources[] = { + { + .start = SPORT1_TCR1, + .end = SPORT1_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT1_RX, + .end = IRQ_SPORT1_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT1_ERROR, + .end = IRQ_SPORT1_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport1_peripherals[] = { + P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, + P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0 +}; + +static struct platform_device bfin_sport1_uart_device = { + .name = "bfin-sport-uart", + .id = 1, + .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources), + .resource = bfin_sport1_uart_resources, + .dev = { + .platform_data = &bfin_sport1_peripherals, /* Passed to driver */ + }, +}; +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART +static struct resource bfin_sport2_uart_resources[] = { + { + .start = SPORT2_TCR1, + .end = SPORT2_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT2_RX, + .end = IRQ_SPORT2_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT2_ERROR, + .end = IRQ_SPORT2_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport2_peripherals[] = { + P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS, + P_SPORT2_DRPRI, P_SPORT2_RSCLK, P_SPORT2_DRSEC, P_SPORT2_DTSEC, 0 +}; + +static struct platform_device bfin_sport2_uart_device = { + .name = "bfin-sport-uart", + .id = 2, + .num_resources = ARRAY_SIZE(bfin_sport2_uart_resources), + .resource = bfin_sport2_uart_resources, + .dev = { + .platform_data = &bfin_sport2_peripherals, /* Passed to driver */ + }, +}; +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART +static struct resource bfin_sport3_uart_resources[] = { + { + .start = SPORT3_TCR1, + .end = SPORT3_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT3_RX, + .end = IRQ_SPORT3_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT3_ERROR, + .end = IRQ_SPORT3_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport3_peripherals[] = { + P_SPORT3_TFS, P_SPORT3_DTPRI, P_SPORT3_TSCLK, P_SPORT3_RFS, + P_SPORT3_DRPRI, P_SPORT3_RSCLK, P_SPORT3_DRSEC, P_SPORT3_DTSEC, 0 +}; + +static struct platform_device bfin_sport3_uart_device = { + .name = "bfin-sport-uart", + .id = 3, + .num_resources = ARRAY_SIZE(bfin_sport3_uart_resources), + .resource = bfin_sport3_uart_resources, + .dev = { + .platform_data = &bfin_sport3_peripherals, /* Passed to driver */ + }, +}; +#endif +#endif + #if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE) unsigned short bfin_can_peripherals[] = { P_CAN0_RX, P_CAN0_TX, 0 @@ -1172,6 +1311,21 @@ static struct platform_device *ezkit_devices[] __initdata = { &bfin_isp1760_device, #endif +#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART + &bfin_sport0_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART + &bfin_sport1_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART + &bfin_sport2_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART + &bfin_sport3_uart_device, +#endif +#endif + #if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE) &bfin_can_device, #endif -- cgit v1.2.3 From b635f1912da764c960c044ffd58ba27157aa2c85 Mon Sep 17 00:00:00 2001 From: Sonic Zhang Date: Wed, 23 Sep 2009 08:06:25 +0000 Subject: Blackfin: allow boards to register early devices Signed-off-by: Sonic Zhang Signed-off-by: Mike Frysinger --- arch/blackfin/kernel/setup.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c index 95448ae9c43a..69cbc1a81201 100644 --- a/arch/blackfin/kernel/setup.c +++ b/arch/blackfin/kernel/setup.c @@ -792,10 +792,17 @@ static inline int __init get_mem_size(void) BUG(); } +__attribute__((weak)) +void __init native_machine_early_platform_add_devices(void) +{ +} + void __init setup_arch(char **cmdline_p) { unsigned long sclk, cclk; + native_machine_early_platform_add_devices(); + enable_shadow_console(); /* Check to make sure we are running on the right processor */ -- cgit v1.2.3 From c13ce9fd26c3a0e32b3bf0b00929181e66114ed2 Mon Sep 17 00:00:00 2001 From: Sonic Zhang Date: Wed, 23 Sep 2009 09:37:46 +0000 Subject: Blackfin: add UART/SPORT early platform resources This lets people easily select the UART/SPORT consoles for early printk while leveraging the pins declared in the boards file. Signed-off-by: Sonic Zhang Signed-off-by: Mike Frysinger --- arch/blackfin/mach-bf518/boards/ezbrd.c | 27 +++++++++++++++++++ arch/blackfin/mach-bf527/boards/cm_bf527.c | 27 +++++++++++++++++++ arch/blackfin/mach-bf527/boards/ezbrd.c | 27 +++++++++++++++++++ arch/blackfin/mach-bf527/boards/ezkit.c | 27 +++++++++++++++++++ arch/blackfin/mach-bf533/boards/H8606.c | 15 +++++++++++ arch/blackfin/mach-bf533/boards/blackstamp.c | 24 +++++++++++++++++ arch/blackfin/mach-bf533/boards/cm_bf533.c | 24 +++++++++++++++++ arch/blackfin/mach-bf533/boards/ezkit.c | 15 +++++++++++ arch/blackfin/mach-bf533/boards/ip0x.c | 15 +++++++++++ arch/blackfin/mach-bf533/boards/stamp.c | 24 +++++++++++++++++ arch/blackfin/mach-bf537/boards/cm_bf537e.c | 27 +++++++++++++++++++ arch/blackfin/mach-bf537/boards/cm_bf537u.c | 27 +++++++++++++++++++ arch/blackfin/mach-bf537/boards/minotaur.c | 27 +++++++++++++++++++ arch/blackfin/mach-bf537/boards/pnav10.c | 18 +++++++++++++ arch/blackfin/mach-bf537/boards/stamp.c | 28 ++++++++++++++++++++ arch/blackfin/mach-bf537/boards/tcm_bf537.c | 27 +++++++++++++++++++ arch/blackfin/mach-bf538/boards/ezkit.c | 36 +++++++++++++++++++++++++ arch/blackfin/mach-bf548/boards/cm_bf548.c | 39 ++++++++++++++++++++++++++++ arch/blackfin/mach-bf548/boards/ezkit.c | 39 ++++++++++++++++++++++++++++ arch/blackfin/mach-bf561/boards/cm_bf561.c | 15 +++++++++++ arch/blackfin/mach-bf561/boards/ezkit.c | 15 +++++++++++ arch/blackfin/mach-bf561/boards/tepla.c | 15 +++++++++++ 22 files changed, 538 insertions(+) diff --git a/arch/blackfin/mach-bf518/boards/ezbrd.c b/arch/blackfin/mach-bf518/boards/ezbrd.c index 06e4316912d3..44d6d5299022 100644 --- a/arch/blackfin/mach-bf518/boards/ezbrd.c +++ b/arch/blackfin/mach-bf518/boards/ezbrd.c @@ -775,6 +775,33 @@ static int __init ezbrd_init(void) arch_initcall(ezbrd_init); +static struct platform_device *ezbrd_early_devices[] __initdata = { +#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART1 + &bfin_uart1_device, +#endif +#endif + +#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART + &bfin_sport0_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART + &bfin_sport1_uart_device, +#endif +#endif +}; + +void __init native_machine_early_platform_add_devices(void) +{ + printk(KERN_INFO "register early platform devices\n"); + early_platform_add_devices(ezbrd_early_devices, + ARRAY_SIZE(ezbrd_early_devices)); +} + void native_machine_restart(char *cmd) { /* workaround reboot hang when booting from SPI */ diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c index 3543bdf3268e..eaa0131f9605 100644 --- a/arch/blackfin/mach-bf527/boards/cm_bf527.c +++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c @@ -1014,6 +1014,33 @@ static int __init cm_init(void) arch_initcall(cm_init); +static struct platform_device *cmbf527_early_devices[] __initdata = { +#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART1 + &bfin_uart1_device, +#endif +#endif + +#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART + &bfin_sport0_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART + &bfin_sport1_uart_device, +#endif +#endif +}; + +void __init native_machine_early_platform_add_devices(void) +{ + printk(KERN_INFO "register early platform devices\n"); + early_platform_add_devices(cmbf527_early_devices, + ARRAY_SIZE(cmbf527_early_devices)); +} + void native_machine_restart(char *cmd) { /* workaround reboot hang when booting from SPI */ diff --git a/arch/blackfin/mach-bf527/boards/ezbrd.c b/arch/blackfin/mach-bf527/boards/ezbrd.c index a99955d11391..3ff61e6fbe95 100644 --- a/arch/blackfin/mach-bf527/boards/ezbrd.c +++ b/arch/blackfin/mach-bf527/boards/ezbrd.c @@ -854,6 +854,33 @@ static int __init ezbrd_init(void) arch_initcall(ezbrd_init); +static struct platform_device *ezbrd_early_devices[] __initdata = { +#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART1 + &bfin_uart1_device, +#endif +#endif + +#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART + &bfin_sport0_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART + &bfin_sport1_uart_device, +#endif +#endif +}; + +void __init native_machine_early_platform_add_devices(void) +{ + printk(KERN_INFO "register early platform devices\n"); + early_platform_add_devices(ezbrd_early_devices, + ARRAY_SIZE(ezbrd_early_devices)); +} + void native_machine_restart(char *cmd) { /* workaround reboot hang when booting from SPI */ diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c index f93323dd5b51..6138b47fbbaf 100644 --- a/arch/blackfin/mach-bf527/boards/ezkit.c +++ b/arch/blackfin/mach-bf527/boards/ezkit.c @@ -1099,6 +1099,33 @@ static int __init ezkit_init(void) arch_initcall(ezkit_init); +static struct platform_device *ezkit_early_devices[] __initdata = { +#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART1 + &bfin_uart1_device, +#endif +#endif + +#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART + &bfin_sport0_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART + &bfin_sport1_uart_device, +#endif +#endif +}; + +void __init native_machine_early_platform_add_devices(void) +{ + printk(KERN_INFO "register early platform devices\n"); + early_platform_add_devices(ezkit_early_devices, + ARRAY_SIZE(ezkit_early_devices)); +} + void native_machine_restart(char *cmd) { /* workaround reboot hang when booting from SPI */ diff --git a/arch/blackfin/mach-bf533/boards/H8606.c b/arch/blackfin/mach-bf533/boards/H8606.c index cb3b74f32346..d10c8e8b69cf 100644 --- a/arch/blackfin/mach-bf533/boards/H8606.c +++ b/arch/blackfin/mach-bf533/boards/H8606.c @@ -455,3 +455,18 @@ static int __init H8606_init(void) } arch_initcall(H8606_init); + +static struct platform_device *H8606_early_devices[] __initdata = { +#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#endif +}; + +void __init native_machine_early_platform_add_devices(void) +{ + printk(KERN_INFO "register early platform devices\n"); + early_platform_add_devices(H8606_early_devices, + ARRAY_SIZE(H8606_early_devices)); +} diff --git a/arch/blackfin/mach-bf533/boards/blackstamp.c b/arch/blackfin/mach-bf533/boards/blackstamp.c index 4d9294642f10..842b4fa76ea9 100644 --- a/arch/blackfin/mach-bf533/boards/blackstamp.c +++ b/arch/blackfin/mach-bf533/boards/blackstamp.c @@ -486,3 +486,27 @@ static int __init blackstamp_init(void) } arch_initcall(blackstamp_init); + +static struct platform_device *stamp_early_devices[] __initdata = { +#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#endif + +#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART + &bfin_sport0_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART + &bfin_sport1_uart_device, +#endif +#endif +}; + +void __init native_machine_early_platform_add_devices(void) +{ + printk(KERN_INFO "register early platform devices\n"); + early_platform_add_devices(stamp_early_devices, + ARRAY_SIZE(stamp_early_devices)); +} diff --git a/arch/blackfin/mach-bf533/boards/cm_bf533.c b/arch/blackfin/mach-bf533/boards/cm_bf533.c index 33ff27a4a474..0592fe0bb8af 100644 --- a/arch/blackfin/mach-bf533/boards/cm_bf533.c +++ b/arch/blackfin/mach-bf533/boards/cm_bf533.c @@ -580,3 +580,27 @@ static int __init cm_bf533_init(void) } arch_initcall(cm_bf533_init); + +static struct platform_device *cm_bf533_early_devices[] __initdata = { +#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#endif + +#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART + &bfin_sport0_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART + &bfin_sport1_uart_device, +#endif +#endif +}; + +void __init native_machine_early_platform_add_devices(void) +{ + printk(KERN_INFO "register early platform devices\n"); + early_platform_add_devices(cm_bf533_early_devices, + ARRAY_SIZE(cm_bf533_early_devices)); +} diff --git a/arch/blackfin/mach-bf533/boards/ezkit.c b/arch/blackfin/mach-bf533/boards/ezkit.c index 50392476722a..5332c9828a33 100644 --- a/arch/blackfin/mach-bf533/boards/ezkit.c +++ b/arch/blackfin/mach-bf533/boards/ezkit.c @@ -531,3 +531,18 @@ static int __init ezkit_init(void) } arch_initcall(ezkit_init); + +static struct platform_device *ezkit_early_devices[] __initdata = { +#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#endif +}; + +void __init native_machine_early_platform_add_devices(void) +{ + printk(KERN_INFO "register early platform devices\n"); + early_platform_add_devices(ezkit_early_devices, + ARRAY_SIZE(ezkit_early_devices)); +} diff --git a/arch/blackfin/mach-bf533/boards/ip0x.c b/arch/blackfin/mach-bf533/boards/ip0x.c index a60d0369ceb7..7349970db978 100644 --- a/arch/blackfin/mach-bf533/boards/ip0x.c +++ b/arch/blackfin/mach-bf533/boards/ip0x.c @@ -308,3 +308,18 @@ static int __init ip0x_init(void) } arch_initcall(ip0x_init); + +static struct platform_device *ip0x_early_devices[] __initdata = { +#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#endif +}; + +void __init native_machine_early_platform_add_devices(void) +{ + printk(KERN_INFO "register early platform devices\n"); + early_platform_add_devices(ip0x_early_devices, + ARRAY_SIZE(ip0x_early_devices)); +} diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c index 428f724dd080..3e80a795d3c7 100644 --- a/arch/blackfin/mach-bf533/boards/stamp.c +++ b/arch/blackfin/mach-bf533/boards/stamp.c @@ -642,6 +642,30 @@ static int __init stamp_init(void) arch_initcall(stamp_init); +static struct platform_device *stamp_early_devices[] __initdata = { +#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#endif + +#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART + &bfin_sport0_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART + &bfin_sport1_uart_device, +#endif +#endif +}; + +void __init native_machine_early_platform_add_devices(void) +{ + printk(KERN_INFO "register early platform devices\n"); + early_platform_add_devices(stamp_early_devices, + ARRAY_SIZE(stamp_early_devices)); +} + void native_machine_restart(char *cmd) { /* workaround pull up on cpld / flash pin not being strong enough */ diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537e.c b/arch/blackfin/mach-bf537/boards/cm_bf537e.c index b4837776b274..c54331282baa 100644 --- a/arch/blackfin/mach-bf537/boards/cm_bf537e.c +++ b/arch/blackfin/mach-bf537/boards/cm_bf537e.c @@ -760,6 +760,33 @@ static int __init cm_bf537e_init(void) arch_initcall(cm_bf537e_init); +static struct platform_device *cm_bf537e_early_devices[] __initdata = { +#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART1 + &bfin_uart1_device, +#endif +#endif + +#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART + &bfin_sport0_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART + &bfin_sport1_uart_device, +#endif +#endif +}; + +void __init native_machine_early_platform_add_devices(void) +{ + printk(KERN_INFO "register early platform devices\n"); + early_platform_add_devices(cm_bf537e_early_devices, + ARRAY_SIZE(cm_bf537e_early_devices)); +} + void bfin_get_ether_addr(char *addr) { random_ether_addr(addr); diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537u.c b/arch/blackfin/mach-bf537/boards/cm_bf537u.c index c6802be65fb1..18daacb00b2c 100644 --- a/arch/blackfin/mach-bf537/boards/cm_bf537u.c +++ b/arch/blackfin/mach-bf537/boards/cm_bf537u.c @@ -725,6 +725,33 @@ static int __init cm_bf537u_init(void) arch_initcall(cm_bf537u_init); +static struct platform_device *cm_bf537u_early_devices[] __initdata = { +#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART1 + &bfin_uart1_device, +#endif +#endif + +#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART + &bfin_sport0_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART + &bfin_sport1_uart_device, +#endif +#endif +}; + +void __init native_machine_early_platform_add_devices(void) +{ + printk(KERN_INFO "register early platform devices\n"); + early_platform_add_devices(cm_bf537u_early_devices, + ARRAY_SIZE(cm_bf537u_early_devices)); +} + void bfin_get_ether_addr(char *addr) { random_ether_addr(addr); diff --git a/arch/blackfin/mach-bf537/boards/minotaur.c b/arch/blackfin/mach-bf537/boards/minotaur.c index 6403964490d9..c489d602c590 100644 --- a/arch/blackfin/mach-bf537/boards/minotaur.c +++ b/arch/blackfin/mach-bf537/boards/minotaur.c @@ -516,6 +516,33 @@ static int __init minotaur_init(void) arch_initcall(minotaur_init); +static struct platform_device *minotaur_early_devices[] __initdata = { +#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART1 + &bfin_uart1_device, +#endif +#endif + +#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART + &bfin_sport0_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART + &bfin_sport1_uart_device, +#endif +#endif +}; + +void __init native_machine_early_platform_add_devices(void) +{ + printk(KERN_INFO "register early platform devices\n"); + early_platform_add_devices(minotaur_early_devices, + ARRAY_SIZE(minotaur_early_devices)); +} + void native_machine_restart(char *cmd) { /* workaround reboot hang when booting from SPI */ diff --git a/arch/blackfin/mach-bf537/boards/pnav10.c b/arch/blackfin/mach-bf537/boards/pnav10.c index 3c36abdf3405..f08533d4aee8 100644 --- a/arch/blackfin/mach-bf537/boards/pnav10.c +++ b/arch/blackfin/mach-bf537/boards/pnav10.c @@ -571,6 +571,24 @@ static int __init pnav_init(void) arch_initcall(pnav_init); +static struct platform_device *stamp_early_devices[] __initdata = { +#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART1 + &bfin_uart1_device, +#endif +#endif +}; + +void __init native_machine_early_platform_add_devices(void) +{ + printk(KERN_INFO "register early platform devices\n"); + early_platform_add_devices(stamp_early_devices, + ARRAY_SIZE(stamp_early_devices)); +} + void bfin_get_ether_addr(char *addr) { random_ether_addr(addr); diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c index df4b6b5c08d4..3cb20d7efbdb 100644 --- a/arch/blackfin/mach-bf537/boards/stamp.c +++ b/arch/blackfin/mach-bf537/boards/stamp.c @@ -1969,6 +1969,34 @@ static int __init stamp_init(void) arch_initcall(stamp_init); + +static struct platform_device *stamp_early_devices[] __initdata = { +#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART1 + &bfin_uart1_device, +#endif +#endif + +#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART + &bfin_sport0_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART + &bfin_sport1_uart_device, +#endif +#endif +}; + +void __init native_machine_early_platform_add_devices(void) +{ + printk(KERN_INFO "register early platform devices\n"); + early_platform_add_devices(stamp_early_devices, + ARRAY_SIZE(stamp_early_devices)); +} + void native_machine_restart(char *cmd) { /* workaround reboot hang when booting from SPI */ diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c index 69a2fb804724..59951cdef588 100644 --- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c +++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c @@ -727,6 +727,33 @@ static int __init tcm_bf537_init(void) arch_initcall(tcm_bf537_init); +static struct platform_device *cm_bf537_early_devices[] __initdata = { +#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART1 + &bfin_uart1_device, +#endif +#endif + +#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART + &bfin_sport0_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART + &bfin_sport1_uart_device, +#endif +#endif +}; + +void __init native_machine_early_platform_add_devices(void) +{ + printk(KERN_INFO "register early platform devices\n"); + early_platform_add_devices(cm_bf537_early_devices, + ARRAY_SIZE(cm_bf537_early_devices)); +} + void bfin_get_ether_addr(char *addr) { random_ether_addr(addr); diff --git a/arch/blackfin/mach-bf538/boards/ezkit.c b/arch/blackfin/mach-bf538/boards/ezkit.c index d7c6b9f18b5e..20387fe09c61 100644 --- a/arch/blackfin/mach-bf538/boards/ezkit.c +++ b/arch/blackfin/mach-bf538/boards/ezkit.c @@ -956,3 +956,39 @@ static int __init ezkit_init(void) } arch_initcall(ezkit_init); + +static struct platform_device *ezkit_early_devices[] __initdata = { +#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART1 + &bfin_uart1_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART2 + &bfin_uart2_device, +#endif +#endif + +#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART + &bfin_sport0_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART + &bfin_sport1_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART + &bfin_sport2_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART + &bfin_sport3_uart_device, +#endif +#endif +}; + +void __init native_machine_early_platform_add_devices(void) +{ + printk(KERN_INFO "register early platform devices\n"); + early_platform_add_devices(ezkit_early_devices, + ARRAY_SIZE(ezkit_early_devices)); +} diff --git a/arch/blackfin/mach-bf548/boards/cm_bf548.c b/arch/blackfin/mach-bf548/boards/cm_bf548.c index cb7458792196..f60c333fec66 100644 --- a/arch/blackfin/mach-bf548/boards/cm_bf548.c +++ b/arch/blackfin/mach-bf548/boards/cm_bf548.c @@ -1165,3 +1165,42 @@ static int __init cm_bf548_init(void) } arch_initcall(cm_bf548_init); + +static struct platform_device *cm_bf548_early_devices[] __initdata = { +#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART1 + &bfin_uart1_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART2 + &bfin_uart2_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART3 + &bfin_uart3_device, +#endif +#endif + +#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART + &bfin_sport0_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART + &bfin_sport1_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART + &bfin_sport2_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART + &bfin_sport3_uart_device, +#endif +#endif +}; + +void __init native_machine_early_platform_add_devices(void) +{ + printk(KERN_INFO "register early platform devices\n"); + early_platform_add_devices(cm_bf548_early_devices, + ARRAY_SIZE(cm_bf548_early_devices)); +} diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c index 97033fea102e..b5d8ea187735 100644 --- a/arch/blackfin/mach-bf548/boards/ezkit.c +++ b/arch/blackfin/mach-bf548/boards/ezkit.c @@ -1390,3 +1390,42 @@ static int __init ezkit_init(void) } arch_initcall(ezkit_init); + +static struct platform_device *ezkit_early_devices[] __initdata = { +#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART1 + &bfin_uart1_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART2 + &bfin_uart2_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART3 + &bfin_uart3_device, +#endif +#endif + +#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART + &bfin_sport0_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART + &bfin_sport1_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART + &bfin_sport2_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART + &bfin_sport3_uart_device, +#endif +#endif +}; + +void __init native_machine_early_platform_add_devices(void) +{ + printk(KERN_INFO "register early platform devices\n"); + early_platform_add_devices(ezkit_early_devices, + ARRAY_SIZE(ezkit_early_devices)); +} diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c index 818266f6348f..37dd37523adf 100644 --- a/arch/blackfin/mach-bf561/boards/cm_bf561.c +++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c @@ -547,3 +547,18 @@ static int __init cm_bf561_init(void) } arch_initcall(cm_bf561_init); + +static struct platform_device *cm_bf561_early_devices[] __initdata = { +#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#endif +}; + +void __init native_machine_early_platform_add_devices(void) +{ + printk(KERN_INFO "register early platform devices\n"); + early_platform_add_devices(cm_bf561_early_devices, + ARRAY_SIZE(cm_bf561_early_devices)); +} diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c index c59041c513e0..c7a5db79f838 100644 --- a/arch/blackfin/mach-bf561/boards/ezkit.c +++ b/arch/blackfin/mach-bf561/boards/ezkit.c @@ -489,3 +489,18 @@ static int __init ezkit_init(void) } arch_initcall(ezkit_init); + +static struct platform_device *ezkit_early_devices[] __initdata = { +#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#endif +}; + +void __init native_machine_early_platform_add_devices(void) +{ + printk(KERN_INFO "register early platform devices\n"); + early_platform_add_devices(ezkit_early_devices, + ARRAY_SIZE(ezkit_early_devices)); +} diff --git a/arch/blackfin/mach-bf561/boards/tepla.c b/arch/blackfin/mach-bf561/boards/tepla.c index 6c33397355b3..d3017e53686b 100644 --- a/arch/blackfin/mach-bf561/boards/tepla.c +++ b/arch/blackfin/mach-bf561/boards/tepla.c @@ -140,3 +140,18 @@ static int __init tepla_init(void) } arch_initcall(tepla_init); + +static struct platform_device *tepla_early_devices[] __initdata = { +#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#endif +}; + +void __init native_machine_early_platform_add_devices(void) +{ + printk(KERN_INFO "register early platform devices\n"); + early_platform_add_devices(tepla_early_devices, + ARRAY_SIZE(tepla_early_devices)); +} -- cgit v1.2.3 From 3630ac34b0ab346ff0910401dbed7af624be7027 Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Tue, 17 Nov 2009 04:26:22 +0000 Subject: Blackfin: bfin_sport: unify & standardize SPORT masks Rather than have every SPORT driver copy & paste things, declare the C structure and MMR bitmasks in one place for everyone to use. Signed-off-by: Mike Frysinger --- arch/blackfin/include/asm/bfin_sport.h | 227 +++++++++++++++++------------ arch/blackfin/include/asm/def_LPBlackfin.h | 2 + 2 files changed, 132 insertions(+), 97 deletions(-) diff --git a/arch/blackfin/include/asm/bfin_sport.h b/arch/blackfin/include/asm/bfin_sport.h index b558908e1c79..9626cf7e4251 100644 --- a/arch/blackfin/include/asm/bfin_sport.h +++ b/arch/blackfin/include/asm/bfin_sport.h @@ -1,7 +1,7 @@ /* - * bfin_sport.h - userspace header for bfin sport driver + * bfin_sport.h - interface to Blackfin SPORTs * - * Copyright 2004-2008 Analog Devices Inc. + * Copyright 2004-2009 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ @@ -9,16 +9,6 @@ #ifndef __BFIN_SPORT_H__ #define __BFIN_SPORT_H__ -#ifdef __KERNEL__ -#include -#include -#include -#include -#endif - -#define SPORT_MAJOR 237 -#define SPORT_NR_DEVS 2 - /* Sport mode: it can be set to TDM, i2s or others */ #define NORM_MODE 0x0 #define TDM_MODE 0x1 @@ -35,7 +25,7 @@ struct sport_config { unsigned int mode:3; /* if TDM mode is selected, channels must be set */ - int channels; /* Must be in 8 units */ + int channels; /* Must be in 8 units */ unsigned int frame_delay:4; /* Delay between frame sync pulse and first bit */ /* I2S mode */ @@ -69,94 +59,137 @@ struct sport_config { #ifdef __KERNEL__ -struct sport_register { - unsigned short tcr1; - unsigned short reserved0; - unsigned short tcr2; - unsigned short reserved1; - unsigned short tclkdiv; - unsigned short reserved2; - unsigned short tfsdiv; - unsigned short reserved3; - unsigned long tx; - unsigned long reserved_l0; - unsigned long rx; - unsigned long reserved_l1; - unsigned short rcr1; - unsigned short reserved4; - unsigned short rcr2; - unsigned short reserved5; - unsigned short rclkdiv; - unsigned short reserved6; - unsigned short rfsdiv; - unsigned short reserved7; - unsigned short stat; - unsigned short reserved8; - unsigned short chnl; - unsigned short reserved9; - unsigned short mcmc1; - unsigned short reserved10; - unsigned short mcmc2; - unsigned short reserved11; - unsigned long mtcs0; - unsigned long mtcs1; - unsigned long mtcs2; - unsigned long mtcs3; - unsigned long mrcs0; - unsigned long mrcs1; - unsigned long mrcs2; - unsigned long mrcs3; -}; - -struct sport_dev { - struct cdev cdev; /* Char device structure */ - - int sport_num; +#include - int dma_rx_chan; - int dma_tx_chan; - - int rx_irq; - unsigned char *rx_buf; /* Buffer store the received data */ - int rx_len; /* How many bytes will be received */ - int rx_received; /* How many bytes has been received */ - - int tx_irq; - const unsigned char *tx_buf; - int tx_len; - int tx_sent; - - int err_irq; - - struct mutex mutex; /* mutual exclusion semaphore */ - struct task_struct *task; - - wait_queue_head_t waitq; - int wait_con; - struct sport_register *regs; - struct sport_config config; +/* + * All Blackfin system MMRs are padded to 32bits even if the register + * itself is only 16bits. So use a helper macro to streamline this. + */ +#define __BFP(m) u16 m; u16 __pad_##m +struct sport_register { + __BFP(tcr1); + __BFP(tcr2); + __BFP(tclkdiv); + __BFP(tfsdiv); + union { + u32 tx32; + u16 tx16; + }; + u32 __pad_tx; + union { + u32 rx32; /* use the anomaly wrapper below */ + u16 rx16; + }; + u32 __pad_rx; + __BFP(rcr1); + __BFP(rcr2); + __BFP(rclkdiv); + __BFP(rfsdiv); + __BFP(stat); + __BFP(chnl); + __BFP(mcmc1); + __BFP(mcmc2); + u32 mtcs0; + u32 mtcs1; + u32 mtcs2; + u32 mtcs3; + u32 mrcs0; + u32 mrcs1; + u32 mrcs2; + u32 mrcs3; }; +#undef __BFP + +#define bfin_read_sport_rx32(base) \ +({ \ + struct sport_register *__mmrs = (void *)base; \ + u32 __ret; \ + unsigned long flags; \ + if (ANOMALY_05000473) \ + local_irq_save(flags); \ + __ret = __mmrs->rx32; \ + if (ANOMALY_05000473) \ + local_irq_restore(flags); \ + __ret; \ +}) #endif -#define SPORT_TCR1 0 -#define SPORT_TCR2 1 -#define SPORT_TCLKDIV 2 -#define SPORT_TFSDIV 3 -#define SPORT_RCR1 8 -#define SPORT_RCR2 9 -#define SPORT_RCLKDIV 10 -#define SPORT_RFSDIV 11 -#define SPORT_CHANNEL 13 -#define SPORT_MCMC1 14 -#define SPORT_MCMC2 15 -#define SPORT_MTCS0 16 -#define SPORT_MTCS1 17 -#define SPORT_MTCS2 18 -#define SPORT_MTCS3 19 -#define SPORT_MRCS0 20 -#define SPORT_MRCS1 21 -#define SPORT_MRCS2 22 -#define SPORT_MRCS3 23 +/* Workaround defBF*.h SPORT MMRs till they get cleansed */ +#undef DTYPE_NORM +#undef SLEN +#undef SP_WOFF +#undef SP_WSIZE + +/* SPORT_TCR1 Masks */ +#define TSPEN 0x0001 /* TX enable */ +#define ITCLK 0x0002 /* Internal TX Clock Select */ +#define TDTYPE 0x000C /* TX Data Formatting Select */ +#define DTYPE_NORM 0x0000 /* Data Format Normal */ +#define DTYPE_ULAW 0x0008 /* Compand Using u-Law */ +#define DTYPE_ALAW 0x000C /* Compand Using A-Law */ +#define TLSBIT 0x0010 /* TX Bit Order */ +#define ITFS 0x0200 /* Internal TX Frame Sync Select */ +#define TFSR 0x0400 /* TX Frame Sync Required Select */ +#define DITFS 0x0800 /* Data Independent TX Frame Sync Select */ +#define LTFS 0x1000 /* Low TX Frame Sync Select */ +#define LATFS 0x2000 /* Late TX Frame Sync Select */ +#define TCKFE 0x4000 /* TX Clock Falling Edge Select */ + +/* SPORT_TCR2 Masks */ +#define SLEN 0x001F /* SPORT TX Word Length (2 - 31) */ +#define DP_SLEN(x) BFIN_DEPOSIT(SLEN, x) +#define EX_SLEN(x) BFIN_EXTRACT(SLEN, x) +#define TXSE 0x0100 /* TX Secondary Enable */ +#define TSFSE 0x0200 /* TX Stereo Frame Sync Enable */ +#define TRFST 0x0400 /* TX Right-First Data Order */ + +/* SPORT_RCR1 Masks */ +#define RSPEN 0x0001 /* RX enable */ +#define IRCLK 0x0002 /* Internal RX Clock Select */ +#define RDTYPE 0x000C /* RX Data Formatting Select */ +/* DTYPE_* defined above */ +#define RLSBIT 0x0010 /* RX Bit Order */ +#define IRFS 0x0200 /* Internal RX Frame Sync Select */ +#define RFSR 0x0400 /* RX Frame Sync Required Select */ +#define LRFS 0x1000 /* Low RX Frame Sync Select */ +#define LARFS 0x2000 /* Late RX Frame Sync Select */ +#define RCKFE 0x4000 /* RX Clock Falling Edge Select */ + +/* SPORT_RCR2 Masks */ +/* SLEN defined above */ +#define RXSE 0x0100 /* RX Secondary Enable */ +#define RSFSE 0x0200 /* RX Stereo Frame Sync Enable */ +#define RRFST 0x0400 /* Right-First Data Order */ + +/* SPORT_STAT Masks */ +#define RXNE 0x0001 /* RX FIFO Not Empty Status */ +#define RUVF 0x0002 /* RX Underflow Status */ +#define ROVF 0x0004 /* RX Overflow Status */ +#define TXF 0x0008 /* TX FIFO Full Status */ +#define TUVF 0x0010 /* TX Underflow Status */ +#define TOVF 0x0020 /* TX Overflow Status */ +#define TXHRE 0x0040 /* TX Hold Register Empty */ + +/* SPORT_MCMC1 Masks */ +#define SP_WOFF 0x03FF /* Multichannel Window Offset Field */ +#define DP_SP_WOFF(x) BFIN_DEPOSIT(SP_WOFF, x) +#define EX_SP_WOFF(x) BFIN_EXTRACT(SP_WOFF, x) +#define SP_WSIZE 0xF000 /* Multichannel Window Size Field */ +#define DP_SP_WSIZE(x) BFIN_DEPOSIT(SP_WSIZE, x) +#define EX_SP_WSIZE(x) BFIN_EXTRACT(SP_WSIZE, x) + +/* SPORT_MCMC2 Masks */ +#define MCCRM 0x0003 /* Multichannel Clock Recovery Mode */ +#define REC_BYPASS 0x0000 /* Bypass Mode (No Clock Recovery) */ +#define REC_2FROM4 0x0002 /* Recover 2 MHz Clock from 4 MHz Clock */ +#define REC_8FROM16 0x0003 /* Recover 8 MHz Clock from 16 MHz Clock */ +#define MCDTXPE 0x0004 /* Multichannel DMA Transmit Packing */ +#define MCDRXPE 0x0008 /* Multichannel DMA Receive Packing */ +#define MCMEN 0x0010 /* Multichannel Frame Mode Enable */ +#define FSDR 0x0080 /* Multichannel Frame Sync to Data Relationship */ +#define MFD 0xF000 /* Multichannel Frame Delay */ +#define DP_MFD(x) BFIN_DEPOSIT(MFD, x) +#define EX_MFD(x) BFIN_EXTRACT(MFD, x) #endif diff --git a/arch/blackfin/include/asm/def_LPBlackfin.h b/arch/blackfin/include/asm/def_LPBlackfin.h index 25906468622f..e521f8eb260a 100644 --- a/arch/blackfin/include/asm/def_LPBlackfin.h +++ b/arch/blackfin/include/asm/def_LPBlackfin.h @@ -12,6 +12,8 @@ #include #define MK_BMSK_(x) (1<> __ffs(mask)) #ifndef __ASSEMBLY__ -- cgit v1.2.3 From c9784ebb23be1e2ef23f537d6df04e0ea0206802 Mon Sep 17 00:00:00 2001 From: Yi Li Date: Fri, 4 Dec 2009 06:56:21 +0000 Subject: Blackfin: flush caches on SMP when one core calls another via IPI Sometimes a SMP system will randomly panic at boot. This is due to caches being out of sync when one core tries to signal the other. So when one core calls another via IPI, flush the data caches. Signed-off-by: Yi Li Signed-off-by: Mike Frysinger --- arch/blackfin/mach-common/smp.c | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c index 369e687582b7..eddb720c718e 100644 --- a/arch/blackfin/mach-common/smp.c +++ b/arch/blackfin/mach-common/smp.c @@ -122,9 +122,17 @@ static void ipi_call_function(unsigned int cpu, struct ipi_message *msg) wait = msg->call_struct.wait; cpu_clear(cpu, msg->call_struct.pending); func(info); - if (wait) + if (wait) { +#ifdef __ARCH_SYNC_CORE_DCACHE + /* + * 'wait' usually means synchronization between CPUs. + * Invalidate D cache in case shared data was changed + * by func() to ensure cache coherence. + */ + resync_core_dcache(); +#endif cpu_clear(cpu, msg->call_struct.waitmask); - else + } else kfree(msg); } @@ -219,6 +227,13 @@ int smp_call_function(void (*func)(void *info), void *info, int wait) blackfin_dcache_invalidate_range( (unsigned long)(&msg->call_struct.waitmask), (unsigned long)(&msg->call_struct.waitmask)); +#ifdef __ARCH_SYNC_CORE_DCACHE + /* + * Invalidate D cache in case shared data was changed by + * other processors to ensure cache coherence. + */ + resync_core_dcache(); +#endif kfree(msg); } return 0; @@ -261,6 +276,13 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, blackfin_dcache_invalidate_range( (unsigned long)(&msg->call_struct.waitmask), (unsigned long)(&msg->call_struct.waitmask)); +#ifdef __ARCH_SYNC_CORE_DCACHE + /* + * Invalidate D cache in case shared data was changed by + * other processors to ensure cache coherence. + */ + resync_core_dcache(); +#endif kfree(msg); } return 0; -- cgit v1.2.3 From e18e7dd33454f277b9438af66d25984362278021 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Mon, 7 Dec 2009 10:05:58 +0000 Subject: Blackfin: fix MPU page permission masks overflow when dealing with async memory Attempting to use the MPU while doing XIP out of parallel flash hooked up to the async memory bus would often result in random crashes as the MPU slowly corrupted memory. The fallout here is that the async banks gain MPU protection from user space too. So any accesses have to go through the mmap() interface rather than just using hardcoded pointers. Signed-off-by: Barry Song Signed-off-by: Mike Frysinger --- arch/blackfin/include/asm/mmu_context.h | 14 +++++++++++--- arch/blackfin/kernel/cplb-mpu/cplbmgr.c | 30 ++++++++++++++++++++++-------- arch/blackfin/kernel/setup.c | 5 +++++ 3 files changed, 38 insertions(+), 11 deletions(-) diff --git a/arch/blackfin/include/asm/mmu_context.h b/arch/blackfin/include/asm/mmu_context.h index ae8ef4ffd806..7f363d7e43a5 100644 --- a/arch/blackfin/include/asm/mmu_context.h +++ b/arch/blackfin/include/asm/mmu_context.h @@ -13,6 +13,7 @@ #include #include #include +#include /* Note: L1 stacks are CPU-private things, so we bluntly disable this feature in SMP mode, and use the per-CPU scratch SRAM bank only to @@ -117,9 +118,16 @@ static inline void protect_page(struct mm_struct *mm, unsigned long addr, unsigned long flags) { unsigned long *mask = mm->context.page_rwx_mask; - unsigned long page = addr >> 12; - unsigned long idx = page >> 5; - unsigned long bit = 1 << (page & 31); + unsigned long page; + unsigned long idx; + unsigned long bit; + + if (unlikely(addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE)) + page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> 12; + else + page = addr >> 12; + idx = page >> 5; + bit = 1 << (page & 31); if (flags & VM_READ) mask[idx] |= bit; diff --git a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c index 930c01c06813..d4cc53a0ef89 100644 --- a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c +++ b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c @@ -114,10 +114,15 @@ static noinline int dcplb_miss(unsigned int cpu) d_data = L2_DMEMORY; } else if (addr >= physical_mem_end) { if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) { - addr &= ~(4 * 1024 * 1024 - 1); - d_data &= ~PAGE_SIZE_4KB; - d_data |= PAGE_SIZE_4MB; - d_data |= CPLB_USER_RD | CPLB_USER_WR; + mask = current_rwx_mask[cpu]; + if (mask) { + int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT; + int idx = page >> 5; + int bit = 1 << (page & 31); + + if (mask[idx] & bit) + d_data |= CPLB_USER_RD; + } } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH && (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) { addr &= ~(1 * 1024 * 1024 - 1); @@ -204,10 +209,19 @@ static noinline int icplb_miss(unsigned int cpu) i_data = L2_IMEMORY; } else if (addr >= physical_mem_end) { if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) { - addr &= ~(4 * 1024 * 1024 - 1); - i_data &= ~PAGE_SIZE_4KB; - i_data |= PAGE_SIZE_4MB; - i_data |= CPLB_USER_RD; + if (!(status & FAULT_USERSUPV)) { + unsigned long *mask = current_rwx_mask[cpu]; + + if (mask) { + int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT; + int idx = page >> 5; + int bit = 1 << (page & 31); + + mask += 2 * page_mask_nelts; + if (mask[idx] & bit) + i_data |= CPLB_USER_RD; + } + } } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH && (status & FAULT_USERSUPV)) { addr &= ~(1 * 1024 * 1024 - 1); diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c index 69cbc1a81201..8dc7ee1ef336 100644 --- a/arch/blackfin/kernel/setup.c +++ b/arch/blackfin/kernel/setup.c @@ -597,7 +597,12 @@ static __init void memory_setup(void) } #ifdef CONFIG_MPU +#if defined(CONFIG_ROMFS_ON_MTD) && defined(CONFIG_MTD_ROM) + page_mask_nelts = (((_ramend + ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE - + ASYNC_BANK0_BASE) >> PAGE_SHIFT) + 31) / 32; +#else page_mask_nelts = ((_ramend >> PAGE_SHIFT) + 31) / 32; +#endif page_mask_order = get_order(3 * page_mask_nelts * sizeof(long)); #endif -- cgit v1.2.3 From 6924dfb0588dab02a331a3a526f8a8fae7d1b3cc Mon Sep 17 00:00:00 2001 From: Michael Hennerich Date: Mon, 7 Dec 2009 13:41:28 +0000 Subject: Blackfin: bf527-ezkit: add support for V2.1 boards Signed-off-by: Michael Hennerich Signed-off-by: Mike Frysinger --- arch/blackfin/Kconfig | 4 +- arch/blackfin/configs/BF527-EZKIT-V2_defconfig | 1811 ++++++++++++++++++++++++ arch/blackfin/mach-bf527/boards/Kconfig | 5 + arch/blackfin/mach-bf527/boards/Makefile | 1 + arch/blackfin/mach-bf527/boards/ezkit.c | 153 +- 5 files changed, 1951 insertions(+), 23 deletions(-) create mode 100644 arch/blackfin/configs/BF527-EZKIT-V2_defconfig diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index 53c1e1d45c68..9409ae369517 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig @@ -349,7 +349,7 @@ config MEM_MT48LC8M32B2B5_7 config MEM_MT48LC32M16A2TG_75 bool - depends on (BFIN527_EZKIT || BFIN532_IP0X || BLACKSTAMP) + depends on (BFIN527_EZKIT || BFIN527_EZKIT_V2 || BFIN532_IP0X || BLACKSTAMP) default y config MEM_MT48LC32M8A2_75 @@ -448,7 +448,7 @@ config VCO_MULT range 1 64 default "22" if BFIN533_EZKIT default "45" if BFIN533_STAMP - default "20" if (BFIN537_STAMP || BFIN527_EZKIT || BFIN548_EZKIT || BFIN548_BLUETECHNIX_CM || BFIN538_EZKIT) + default "20" if (BFIN537_STAMP || BFIN527_EZKIT || BFIN527_EZKIT_V2 || BFIN548_EZKIT || BFIN548_BLUETECHNIX_CM || BFIN538_EZKIT) default "22" if BFIN533_BLUETECHNIX_CM default "20" if (BFIN537_BLUETECHNIX_CM_E || BFIN537_BLUETECHNIX_CM_U || BFIN527_BLUETECHNIX_CM || BFIN561_BLUETECHNIX_CM) default "20" if BFIN561_EZKIT diff --git a/arch/blackfin/configs/BF527-EZKIT-V2_defconfig b/arch/blackfin/configs/BF527-EZKIT-V2_defconfig new file mode 100644 index 000000000000..d2dfcb0e7ce4 --- /dev/null +++ b/arch/blackfin/configs/BF527-EZKIT-V2_defconfig @@ -0,0 +1,1811 @@ +# +# Automatically generated make config: don't edit +# Linux kernel version: 2.6.32.2 +# +# CONFIG_MMU is not set +# CONFIG_FPU is not set +CONFIG_RWSEM_GENERIC_SPINLOCK=y +# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set +CONFIG_BLACKFIN=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_BUG=y +CONFIG_ZONE_DMA=y +CONFIG_GENERIC_FIND_NEXT_BIT=y +CONFIG_GENERIC_HARDIRQS=y +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y +CONFIG_GENERIC_GPIO=y +CONFIG_FORCE_MAX_ZONEORDER=14 +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y + +# +# General setup +# +CONFIG_EXPERIMENTAL=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_LOCALVERSION="" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_BZIP2=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_BZIP2 is not set +# CONFIG_KERNEL_LZMA is not set +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +# CONFIG_TASKSTATS is not set +# CONFIG_AUDIT is not set + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_RCU_TRACE is not set +CONFIG_RCU_FANOUT=32 +# CONFIG_RCU_FANOUT_EXACT is not set +# CONFIG_TREE_RCU_TRACE is not set +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=14 +# CONFIG_GROUP_SCHED is not set +# CONFIG_CGROUPS is not set +# CONFIG_SYSFS_DEPRECATED_V2 is not set +# CONFIG_RELAY is not set +# CONFIG_NAMESPACES is not set +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_EMBEDDED=y +CONFIG_UID16=y +# CONFIG_SYSCTL_SYSCALL is not set +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_ALL is not set +# CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_HOTPLUG=y +CONFIG_PRINTK=y +CONFIG_BUG=y +# CONFIG_ELF_CORE is not set +CONFIG_BASE_FULL=y +# CONFIG_FUTEX is not set +CONFIG_EPOLL=y +# CONFIG_SIGNALFD is not set +# CONFIG_TIMERFD is not set +# CONFIG_EVENTFD is not set +# CONFIG_AIO is not set + +# +# Kernel Performance Events And Counters +# +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_COMPAT_BRK=y +CONFIG_SLAB=y +# CONFIG_SLUB is not set +# CONFIG_SLOB is not set +CONFIG_MMAP_ALLOW_UNINITIALIZED=y +# CONFIG_PROFILING is not set +CONFIG_HAVE_OPROFILE=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +# CONFIG_SLOW_WORK is not set +# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set +CONFIG_SLABINFO=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_BLOCK=y +# CONFIG_LBDAF is not set +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_BLK_DEV_INTEGRITY is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +# CONFIG_IOSCHED_DEADLINE is not set +CONFIG_IOSCHED_CFQ=y +CONFIG_DEFAULT_AS=y +# CONFIG_DEFAULT_DEADLINE is not set +# CONFIG_DEFAULT_CFQ is not set +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="anticipatory" +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set +# CONFIG_FREEZER is not set + +# +# Blackfin Processor Options +# + +# +# Processor and Board Settings +# +# CONFIG_BF512 is not set +# CONFIG_BF514 is not set +# CONFIG_BF516 is not set +# CONFIG_BF518 is not set +# CONFIG_BF522 is not set +# CONFIG_BF523 is not set +# CONFIG_BF524 is not set +# CONFIG_BF525 is not set +# CONFIG_BF526 is not set +CONFIG_BF527=y +# CONFIG_BF531 is not set +# CONFIG_BF532 is not set +# CONFIG_BF533 is not set +# CONFIG_BF534 is not set +# CONFIG_BF536 is not set +# CONFIG_BF537 is not set +# CONFIG_BF538 is not set +# CONFIG_BF539 is not set +# CONFIG_BF542_std is not set +# CONFIG_BF542M is not set +# CONFIG_BF544_std is not set +# CONFIG_BF544M is not set +# CONFIG_BF547_std is not set +# CONFIG_BF547M is not set +# CONFIG_BF548_std is not set +# CONFIG_BF548M is not set +# CONFIG_BF549_std is not set +# CONFIG_BF549M is not set +# CONFIG_BF561 is not set +CONFIG_BF_REV_MIN=0 +CONFIG_BF_REV_MAX=2 +# CONFIG_BF_REV_0_0 is not set +# CONFIG_BF_REV_0_1 is not set +CONFIG_BF_REV_0_2=y +# CONFIG_BF_REV_0_3 is not set +# CONFIG_BF_REV_0_4 is not set +# CONFIG_BF_REV_0_5 is not set +# CONFIG_BF_REV_0_6 is not set +# CONFIG_BF_REV_ANY is not set +# CONFIG_BF_REV_NONE is not set +CONFIG_MEM_MT48LC32M16A2TG_75=y +CONFIG_IRQ_PLL_WAKEUP=7 +CONFIG_IRQ_DMA0_ERROR=7 +CONFIG_IRQ_DMAR0_BLK=7 +CONFIG_IRQ_DMAR1_BLK=7 +CONFIG_IRQ_DMAR0_OVR=7 +CONFIG_IRQ_DMAR1_OVR=7 +CONFIG_IRQ_PPI_ERROR=7 +CONFIG_IRQ_MAC_ERROR=7 +CONFIG_IRQ_SPORT0_ERROR=7 +CONFIG_IRQ_SPORT1_ERROR=7 +CONFIG_IRQ_UART0_ERROR=7 +CONFIG_IRQ_UART1_ERROR=7 +CONFIG_IRQ_RTC=8 +CONFIG_IRQ_PPI=8 +CONFIG_IRQ_SPORT0_RX=9 +CONFIG_IRQ_SPORT0_TX=9 +CONFIG_IRQ_SPORT1_RX=9 +CONFIG_IRQ_SPORT1_TX=9 +CONFIG_IRQ_TWI=10 +CONFIG_IRQ_UART0_RX=10 +CONFIG_IRQ_UART0_TX=10 +CONFIG_IRQ_UART1_RX=10 +CONFIG_IRQ_UART1_TX=10 +CONFIG_IRQ_OPTSEC=11 +CONFIG_IRQ_CNT=11 +CONFIG_IRQ_MAC_RX=11 +CONFIG_IRQ_PORTH_INTA=11 +CONFIG_IRQ_MAC_TX=11 +CONFIG_IRQ_PORTH_INTB=11 +CONFIG_IRQ_TIMER0=8 +CONFIG_IRQ_TIMER1=12 +CONFIG_IRQ_TIMER2=12 +CONFIG_IRQ_TIMER3=12 +CONFIG_IRQ_TIMER4=12 +CONFIG_IRQ_TIMER5=12 +CONFIG_IRQ_TIMER6=12 +CONFIG_IRQ_TIMER7=12 +CONFIG_IRQ_PORTG_INTA=12 +CONFIG_IRQ_PORTG_INTB=12 +CONFIG_IRQ_MEM_DMA0=13 +CONFIG_IRQ_MEM_DMA1=13 +CONFIG_IRQ_WATCH=13 +CONFIG_IRQ_PORTF_INTA=13 +CONFIG_IRQ_PORTF_INTB=13 +CONFIG_BF52x=y +# CONFIG_BFIN527_EZKIT is not set +CONFIG_BFIN527_EZKIT_V2=y +# CONFIG_BFIN527_BLUETECHNIX_CM is not set +# CONFIG_BFIN526_EZBRD is not set + +# +# BF527 Specific Configuration +# + +# +# Alternative Multiplexing Scheme +# +# CONFIG_BF527_SPORT0_PORTF is not set +CONFIG_BF527_SPORT0_PORTG=y +CONFIG_BF527_SPORT0_TSCLK_PG10=y +# CONFIG_BF527_SPORT0_TSCLK_PG14 is not set +CONFIG_BF527_UART1_PORTF=y +# CONFIG_BF527_UART1_PORTG is not set +# CONFIG_BF527_NAND_D_PORTF is not set +CONFIG_BF527_NAND_D_PORTH=y + +# +# Interrupt Priority Assignment +# + +# +# Priority +# +CONFIG_IRQ_SPI=10 +CONFIG_IRQ_SPI_ERROR=7 +CONFIG_IRQ_NFC_ERROR=7 +CONFIG_IRQ_HDMA_ERROR=7 +CONFIG_IRQ_HDMA=7 +CONFIG_IRQ_USB_EINT=10 +CONFIG_IRQ_USB_INT0=11 +CONFIG_IRQ_USB_INT1=11 +CONFIG_IRQ_USB_INT2=11 +CONFIG_IRQ_USB_DMA=11 + +# +# Board customizations +# +# CONFIG_CMDLINE_BOOL is not set +CONFIG_BOOT_LOAD=0x1000 + +# +# Clock/PLL Setup +# +CONFIG_CLKIN_HZ=25000000 +# CONFIG_BFIN_KERNEL_CLOCK is not set +CONFIG_MAX_VCO_HZ=600000000 +CONFIG_MIN_VCO_HZ=50000000 +CONFIG_MAX_SCLK_HZ=133333333 +CONFIG_MIN_SCLK_HZ=27000000 + +# +# Kernel Timer/Scheduler +# +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +# CONFIG_SCHED_HRTICK is not set +CONFIG_GENERIC_TIME=y +CONFIG_GENERIC_CLOCKEVENTS=y +# CONFIG_TICKSOURCE_GPTMR0 is not set +CONFIG_TICKSOURCE_CORETMR=y +# CONFIG_CYCLES_CLOCKSOURCE is not set +# CONFIG_GPTMR0_CLOCKSOURCE is not set +# CONFIG_NO_HZ is not set +# CONFIG_HIGH_RES_TIMERS is not set +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y + +# +# Misc +# +CONFIG_BFIN_SCRATCH_REG_RETN=y +# CONFIG_BFIN_SCRATCH_REG_RETE is not set +# CONFIG_BFIN_SCRATCH_REG_CYCLES is not set + +# +# Blackfin Kernel Optimizations +# + +# +# Memory Optimizations +# +CONFIG_I_ENTRY_L1=y +CONFIG_EXCPT_IRQ_SYSC_L1=y +CONFIG_DO_IRQ_L1=y +CONFIG_CORE_TIMER_IRQ_L1=y +CONFIG_IDLE_L1=y +# CONFIG_SCHEDULE_L1 is not set +CONFIG_ARITHMETIC_OPS_L1=y +CONFIG_ACCESS_OK_L1=y +# CONFIG_MEMSET_L1 is not set +# CONFIG_MEMCPY_L1 is not set +# CONFIG_SYS_BFIN_SPINLOCK_L1 is not set +# CONFIG_IP_CHECKSUM_L1 is not set +CONFIG_CACHELINE_ALIGNED_L1=y +# CONFIG_SYSCALL_TAB_L1 is not set +# CONFIG_CPLB_SWITCH_TAB_L1 is not set +CONFIG_APP_STACK_L1=y + +# +# Speed Optimizations +# +CONFIG_BFIN_INS_LOWOVERHEAD=y +CONFIG_RAMKERNEL=y +# CONFIG_ROMKERNEL is not set +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +# CONFIG_DISCONTIGMEM_MANUAL is not set +# CONFIG_SPARSEMEM_MANUAL is not set +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=1 +CONFIG_VIRT_TO_BUS=y +CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 +CONFIG_BFIN_GPTIMERS=y +# CONFIG_DMA_UNCACHED_4M is not set +# CONFIG_DMA_UNCACHED_2M is not set +CONFIG_DMA_UNCACHED_1M=y +# CONFIG_DMA_UNCACHED_512K is not set +# CONFIG_DMA_UNCACHED_256K is not set +# CONFIG_DMA_UNCACHED_128K is not set +# CONFIG_DMA_UNCACHED_NONE is not set + +# +# Cache Support +# +CONFIG_BFIN_ICACHE=y +CONFIG_BFIN_EXTMEM_ICACHEABLE=y +CONFIG_BFIN_DCACHE=y +# CONFIG_BFIN_DCACHE_BANKA is not set +CONFIG_BFIN_EXTMEM_DCACHEABLE=y +CONFIG_BFIN_EXTMEM_WRITEBACK=y +# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set + +# +# Memory Protection Unit +# +# CONFIG_MPU is not set + +# +# Asynchronous Memory Configuration +# + +# +# EBIU_AMGCTL Global Control +# +CONFIG_C_AMCKEN=y +CONFIG_C_CDPRIO=y +# CONFIG_C_AMBEN is not set +# CONFIG_C_AMBEN_B0 is not set +# CONFIG_C_AMBEN_B0_B1 is not set +# CONFIG_C_AMBEN_B0_B1_B2 is not set +CONFIG_C_AMBEN_ALL=y + +# +# EBIU_AMBCTL Control +# +CONFIG_BANK_0=0x7BB0 +CONFIG_BANK_1=0x7BB0 +CONFIG_BANK_2=0x7BB0 +CONFIG_BANK_3=0x99B2 + +# +# Bus options (PCI, PCMCIA, EISA, MCA, ISA) +# +# CONFIG_ARCH_SUPPORTS_MSI is not set +# CONFIG_PCCARD is not set + +# +# Executable file formats +# +CONFIG_BINFMT_ELF_FDPIC=y +CONFIG_BINFMT_FLAT=y +CONFIG_BINFMT_ZFLAT=y +# CONFIG_BINFMT_SHARED_FLAT is not set +# CONFIG_HAVE_AOUT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Power management options +# +# CONFIG_PM is not set +CONFIG_ARCH_SUSPEND_POSSIBLE=y + +# +# CPU Frequency scaling +# +# CONFIG_CPU_FREQ is not set +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_MMAP is not set +CONFIG_UNIX=y +# CONFIG_NET_KEY is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_FIB_HASH=y +CONFIG_IP_PNP=y +# CONFIG_IP_PNP_DHCP is not set +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_XFRM_TUNNEL is not set +# CONFIG_INET_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_TRANSPORT is not set +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +# CONFIG_INET_DIAG is not set +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +# CONFIG_IPV6 is not set +# CONFIG_NETLABEL is not set +# CONFIG_NETWORK_SECMARK is not set +# CONFIG_NETFILTER is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_NET_DSA is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set +# CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +CONFIG_IRDA=m + +# +# IrDA protocols +# +CONFIG_IRLAN=m +CONFIG_IRCOMM=m +# CONFIG_IRDA_ULTRA is not set + +# +# IrDA options +# +# CONFIG_IRDA_CACHE_LAST_LSAP is not set +# CONFIG_IRDA_FAST_RR is not set +# CONFIG_IRDA_DEBUG is not set + +# +# Infrared-port device drivers +# + +# +# SIR device drivers +# +CONFIG_IRTTY_SIR=m +CONFIG_BFIN_SIR=m +CONFIG_BFIN_SIR0=y +CONFIG_SIR_BFIN_DMA=y +# CONFIG_SIR_BFIN_PIO is not set + +# +# Dongle support +# +# CONFIG_DONGLE is not set +# CONFIG_KINGSUN_DONGLE is not set +# CONFIG_KSDAZZLE_DONGLE is not set +# CONFIG_KS959_DONGLE is not set + +# +# FIR device drivers +# +# CONFIG_USB_IRDA is not set +# CONFIG_SIGMATEL_FIR is not set +# CONFIG_MCS_FIR is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_WIRELESS is not set +# CONFIG_WIMAX is not set +# CONFIG_RFKILL is not set +# CONFIG_NET_9P is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +# CONFIG_FW_LOADER is not set +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_SYS_HYPERVISOR is not set +# CONFIG_CONNECTOR is not set +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_CONCAT is not set +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_REDBOOT_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +# CONFIG_MTD_AR7_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=m +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_MTD_OOPS is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +CONFIG_MTD_JEDECPROBE=m +CONFIG_MTD_GEN_PROBE=m +# CONFIG_MTD_CFI_ADV_OPTIONS is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_CFI_INTELEXT is not set +# CONFIG_MTD_CFI_AMDSTD is not set +# CONFIG_MTD_CFI_STAA is not set +CONFIG_MTD_RAM=y +CONFIG_MTD_ROM=m +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +CONFIG_MTD_COMPLEX_MAPPINGS=y +# CONFIG_MTD_PHYSMAP is not set +# CONFIG_MTD_GPIO_ADDR is not set +# CONFIG_MTD_UCLINUX is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_DATAFLASH is not set +CONFIG_MTD_M25P80=y +CONFIG_M25PXX_USE_FAST_READ=y +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set +CONFIG_MTD_NAND=m +# CONFIG_MTD_NAND_VERIFY_WRITE is not set +# CONFIG_MTD_NAND_ECC_SMC is not set +# CONFIG_MTD_NAND_MUSEUM_IDS is not set +CONFIG_MTD_NAND_IDS=m +# CONFIG_MTD_NAND_BF5XX is not set +# CONFIG_MTD_NAND_DISKONCHIP is not set +# CONFIG_MTD_NAND_NANDSIM is not set +# CONFIG_MTD_NAND_PLATFORM is not set +# CONFIG_MTD_ALAUDA is not set +# CONFIG_MTD_ONENAND is not set + +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set + +# +# UBI - Unsorted block images +# +# CONFIG_MTD_UBI is not set +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_COW_COMMON is not set +# CONFIG_BLK_DEV_LOOP is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_UB is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=4096 +# CONFIG_BLK_DEV_XIP is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +# CONFIG_BLK_DEV_HD is not set +CONFIG_MISC_DEVICES=y +# CONFIG_AD525X_DPOT is not set +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_ISL29003 is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +# CONFIG_RAID_ATTRS is not set +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +# CONFIG_SCSI_TGT is not set +# CONFIG_SCSI_NETLINK is not set +# CONFIG_SCSI_PROC_FS is not set + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +# CONFIG_CHR_DEV_ST is not set +# CONFIG_CHR_DEV_OSST is not set +CONFIG_BLK_DEV_SR=m +# CONFIG_BLK_DEV_SR_VENDOR is not set +# CONFIG_CHR_DEV_SG is not set +# CONFIG_CHR_DEV_SCH is not set +# CONFIG_SCSI_MULTI_LUN is not set +# CONFIG_SCSI_CONSTANTS is not set +# CONFIG_SCSI_LOGGING is not set +# CONFIG_SCSI_SCAN_ASYNC is not set +CONFIG_SCSI_WAIT_SCAN=m + +# +# SCSI Transports +# +# CONFIG_SCSI_SPI_ATTRS is not set +# CONFIG_SCSI_FC_ATTRS is not set +# CONFIG_SCSI_ISCSI_ATTRS is not set +# CONFIG_SCSI_SAS_LIBSAS is not set +# CONFIG_SCSI_SRP_ATTRS is not set +# CONFIG_SCSI_LOWLEVEL is not set +# CONFIG_SCSI_DH is not set +# CONFIG_SCSI_OSD_INITIATOR is not set +# CONFIG_ATA is not set +# CONFIG_MD is not set +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_MACVLAN is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set +# CONFIG_VETH is not set +CONFIG_PHYLIB=y + +# +# MII PHY device drivers +# +# CONFIG_MARVELL_PHY is not set +# CONFIG_DAVICOM_PHY is not set +# CONFIG_QSEMI_PHY is not set +# CONFIG_LXT_PHY is not set +# CONFIG_CICADA_PHY is not set +# CONFIG_VITESSE_PHY is not set +# CONFIG_SMSC_PHY is not set +# CONFIG_BROADCOM_PHY is not set +# CONFIG_ICPLUS_PHY is not set +# CONFIG_REALTEK_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_LSI_ET1011C_PHY is not set +# CONFIG_FIXED_PHY is not set +# CONFIG_MDIO_BITBANG is not set +CONFIG_NET_ETHERNET=y +CONFIG_MII=y +CONFIG_BFIN_MAC=y +CONFIG_BFIN_MAC_USE_L1=y +CONFIG_BFIN_TX_DESC_NUM=10 +CONFIG_BFIN_RX_DESC_NUM=20 +CONFIG_BFIN_MAC_RMII=y +# CONFIG_SMC91X is not set +# CONFIG_DM9000 is not set +# CONFIG_ENC28J60 is not set +# CONFIG_ETHOC is not set +# CONFIG_SMSC911X is not set +# CONFIG_DNET is not set +# CONFIG_ADF702X is not set +# CONFIG_IBM_NEW_EMAC_ZMII is not set +# CONFIG_IBM_NEW_EMAC_RGMII is not set +# CONFIG_IBM_NEW_EMAC_TAH is not set +# CONFIG_IBM_NEW_EMAC_EMAC4 is not set +# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set +# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set +# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set +# CONFIG_B44 is not set +# CONFIG_KS8842 is not set +# CONFIG_KS8851 is not set +# CONFIG_KS8851_MLL is not set +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set +# CONFIG_WLAN is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# + +# +# USB Network Adapters +# +# CONFIG_USB_CATC is not set +# CONFIG_USB_KAWETH is not set +# CONFIG_USB_PEGASUS is not set +# CONFIG_USB_RTL8150 is not set +# CONFIG_USB_USBNET is not set +# CONFIG_WAN is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +# CONFIG_NETCONSOLE is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_ISDN is not set +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_FF_MEMLESS=m +# CONFIG_INPUT_POLLDEV is not set + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +CONFIG_KEYBOARD_ADP5520=y +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_QT2160 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +CONFIG_INPUT_TOUCHSCREEN=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set +CONFIG_TOUCHSCREEN_AD7879_I2C=y +CONFIG_TOUCHSCREEN_AD7879=y +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_ELO is not set +# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_MK712 is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_WM97XX is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_ATI_REMOTE is not set +# CONFIG_INPUT_ATI_REMOTE2 is not set +# CONFIG_INPUT_KEYSPAN_REMOTE is not set +# CONFIG_INPUT_POWERMATE is not set +# CONFIG_INPUT_YEALINK is not set +# CONFIG_INPUT_CM109 is not set +# CONFIG_INPUT_UINPUT is not set +# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set +# CONFIG_INPUT_BFIN_ROTARY is not set +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_PCF8574 is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_BFIN_DMA_INTERFACE=m +# CONFIG_BFIN_PPI is not set +# CONFIG_BFIN_PPIFCD is not set +# CONFIG_BFIN_SIMPLE_TIMER is not set +# CONFIG_BFIN_SPI_ADC is not set +CONFIG_BFIN_SPORT=m +# CONFIG_BFIN_TWI_LCD is not set +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +# CONFIG_VT_HW_CONSOLE_BINDING is not set +# CONFIG_DEVKMEM is not set +CONFIG_BFIN_JTAG_COMM=m +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_MAX3100 is not set +CONFIG_SERIAL_BFIN=y +CONFIG_SERIAL_BFIN_CONSOLE=y +CONFIG_SERIAL_BFIN_DMA=y +# CONFIG_SERIAL_BFIN_PIO is not set +# CONFIG_SERIAL_BFIN_UART0 is not set +CONFIG_SERIAL_BFIN_UART1=y +# CONFIG_BFIN_UART1_CTSRTS is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +# CONFIG_SERIAL_BFIN_SPORT is not set +CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set +# CONFIG_LEGACY_PTYS is not set +CONFIG_BFIN_OTP=y +# CONFIG_BFIN_OTP_WRITE_ENABLE is not set +# CONFIG_IPMI_HANDLER is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_R3964 is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_HELPER_AUTO=y + +# +# I2C Hardware Bus support +# + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +CONFIG_I2C_BLACKFIN_TWI=y +CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 +# CONFIG_I2C_GPIO is not set +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_SIMTEC is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set +# CONFIG_I2C_TINY_USB is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_STUB is not set + +# +# Miscellaneous I2C Chip support +# +# CONFIG_DS1682 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# CONFIG_I2C_DEBUG_CHIP is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y + +# +# SPI Master Controller Drivers +# +CONFIG_SPI_BFIN=y +# CONFIG_SPI_BFIN_LOCK is not set +# CONFIG_SPI_BFIN_SPORT is not set +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_GPIO is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_TLE62X0 is not set + +# +# PPS support +# +# CONFIG_PPS is not set +CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y +CONFIG_GPIOLIB=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_SYSFS=y + +# +# Memory mapped GPIO expanders: +# + +# +# I2C GPIO expanders: +# +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_ADP5520 is not set +# CONFIG_GPIO_ADP5588 is not set + +# +# PCI GPIO expanders: +# + +# +# SPI GPIO expanders: +# +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MCP23S08 is not set +# CONFIG_GPIO_MC33880 is not set + +# +# AC97 GPIO expanders: +# +# CONFIG_W1 is not set +# CONFIG_POWER_SUPPLY is not set +# CONFIG_HWMON is not set +# CONFIG_THERMAL is not set +CONFIG_WATCHDOG=y +# CONFIG_WATCHDOG_NOWAYOUT is not set + +# +# Watchdog Device Drivers +# +# CONFIG_SOFT_WATCHDOG is not set +CONFIG_BFIN_WDT=y + +# +# USB-based Watchdog Cards +# +# CONFIG_USBPCWATCHDOG is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_UCB1400_CORE is not set +# CONFIG_TPS65010 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_PMIC_DA903X is not set +CONFIG_PMIC_ADP5520=y +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_MC13783 is not set +# CONFIG_AB3100_CORE is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_REGULATOR is not set +# CONFIG_MEDIA_SUPPORT is not set + +# +# Graphics support +# +# CONFIG_VGASTATE is not set +# CONFIG_VIDEO_OUTPUT_CONTROL is not set +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +# CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +# CONFIG_FB_SYS_FILLRECT is not set +# CONFIG_FB_SYS_COPYAREA is not set +# CONFIG_FB_SYS_IMAGEBLIT is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set +# CONFIG_FB_SYS_FOPS is not set +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_BACKLIGHT is not set +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_BFIN_T350MCQB is not set +CONFIG_FB_BFIN_LQ035Q1=y +# CONFIG_FB_BFIN_7393 is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set +# CONFIG_LCD_PLATFORM is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=m +CONFIG_BACKLIGHT_GENERIC=m +# CONFIG_BACKLIGHT_ADP5520 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set + +# +# Display device support +# +# CONFIG_DISPLAY_SUPPORT is not set + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set +# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +# CONFIG_LOGO_LINUX_CLUT224 is not set +# CONFIG_LOGO_BLACKFIN_VGA16 is not set +CONFIG_LOGO_BLACKFIN_CLUT224=y +CONFIG_SOUND=m +# CONFIG_SOUND_OSS_CORE is not set +CONFIG_SND=m +CONFIG_SND_TIMER=m +CONFIG_SND_PCM=m +CONFIG_SND_JACK=y +# CONFIG_SND_SEQUENCER is not set +# CONFIG_SND_MIXER_OSS is not set +# CONFIG_SND_PCM_OSS is not set +# CONFIG_SND_DYNAMIC_MINORS is not set +CONFIG_SND_SUPPORT_OLD_API=y +CONFIG_SND_VERBOSE_PROCFS=y +# CONFIG_SND_VERBOSE_PRINTK is not set +# CONFIG_SND_DEBUG is not set +# CONFIG_SND_RAWMIDI_SEQ is not set +# CONFIG_SND_OPL3_LIB_SEQ is not set +# CONFIG_SND_OPL4_LIB_SEQ is not set +# CONFIG_SND_SBAWE_SEQ is not set +# CONFIG_SND_EMU10K1_SEQ is not set +CONFIG_SND_DRIVERS=y +# CONFIG_SND_DUMMY is not set +# CONFIG_SND_MTPAV is not set +# CONFIG_SND_SERIAL_U16550 is not set +# CONFIG_SND_MPU401 is not set +CONFIG_SND_SPI=y + +# +# ALSA Blackfin devices +# +# CONFIG_SND_BFIN_AD73322 is not set +CONFIG_SND_USB=y +# CONFIG_SND_USB_AUDIO is not set +# CONFIG_SND_USB_CAIAQ is not set +CONFIG_SND_SOC=m +CONFIG_SND_SOC_AC97_BUS=y +CONFIG_SND_BF5XX_I2S=m +CONFIG_SND_BF5XX_SOC_SSM2602=m +# CONFIG_SND_BF5XX_SOC_AD73311 is not set +# CONFIG_SND_BF5XX_SOC_ADAU1371 is not set +# CONFIG_SND_BF5XX_SOC_ADAU1761 is not set +# CONFIG_SND_BF5XX_TDM is not set +CONFIG_SND_BF5XX_AC97=m +CONFIG_SND_BF5XX_MMAP_SUPPORT=y +# CONFIG_SND_BF5XX_MULTICHAN_SUPPORT is not set +# CONFIG_SND_BF5XX_HAVE_COLD_RESET is not set +CONFIG_SND_BF5XX_SOC_AD1980=m +CONFIG_SND_BF5XX_SOC_SPORT=m +CONFIG_SND_BF5XX_SOC_I2S=m +CONFIG_SND_BF5XX_SOC_AC97=m +CONFIG_SND_BF5XX_SPORT_NUM=0 +CONFIG_SND_SOC_I2C_AND_SPI=m +# CONFIG_SND_SOC_ALL_CODECS is not set +CONFIG_SND_SOC_AD1980=m +CONFIG_SND_SOC_SSM2602=m +# CONFIG_SOUND_PRIME is not set +CONFIG_AC97_BUS=m +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +# CONFIG_HIDRAW is not set + +# +# USB Input Devices +# +CONFIG_USB_HID=y +# CONFIG_HID_PID is not set +# CONFIG_USB_HIDDEV is not set + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=y +CONFIG_HID_APPLE=y +CONFIG_HID_BELKIN=y +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +CONFIG_HID_CYPRESS=y +# CONFIG_HID_DRAGONRISE is not set +CONFIG_HID_EZKEY=y +# CONFIG_HID_KYE is not set +CONFIG_HID_GYRATION=y +# CONFIG_HID_TWINHAN is not set +# CONFIG_HID_KENSINGTON is not set +CONFIG_HID_LOGITECH=y +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +# CONFIG_HID_NTRIG is not set +CONFIG_HID_PANTHERLORD=y +# CONFIG_PANTHERLORD_FF is not set +CONFIG_HID_PETALYNX=y +CONFIG_HID_SAMSUNG=y +CONFIG_HID_SONY=y +CONFIG_HID_SUNPLUS=y +# CONFIG_HID_GREENASIA is not set +# CONFIG_HID_SMARTJOYPLUS is not set +# CONFIG_HID_TOPSEED is not set +# CONFIG_HID_THRUSTMASTER is not set +# CONFIG_HID_ZEROPLUS is not set +CONFIG_USB_SUPPORT=y +CONFIG_USB_ARCH_HAS_HCD=y +# CONFIG_USB_ARCH_HAS_OHCI is not set +# CONFIG_USB_ARCH_HAS_EHCI is not set +CONFIG_USB=y +# CONFIG_USB_DEBUG is not set +# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set + +# +# Miscellaneous USB options +# +CONFIG_USB_DEVICEFS=y +# CONFIG_USB_DEVICE_CLASS is not set +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_WHITELIST is not set +CONFIG_USB_OTG_BLACKLIST_HUB=y +CONFIG_USB_MON=y +# CONFIG_USB_WUSB is not set +# CONFIG_USB_WUSB_CBAF is not set + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_ISP1760_HCD is not set +# CONFIG_USB_ISP1362_HCD is not set +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HWA_HCD is not set +CONFIG_USB_MUSB_HDRC=y +CONFIG_USB_MUSB_SOC=y + +# +# Blackfin high speed USB Support +# +CONFIG_USB_MUSB_HOST=y +# CONFIG_USB_MUSB_PERIPHERAL is not set +# CONFIG_USB_MUSB_OTG is not set +CONFIG_USB_MUSB_HDRC_HCD=y +# CONFIG_MUSB_PIO_ONLY is not set +CONFIG_USB_INVENTRA_DMA=y +# CONFIG_USB_TI_CPPI_DMA is not set +# CONFIG_USB_MUSB_DEBUG is not set + +# +# USB Device Class drivers +# +# CONFIG_USB_ACM is not set +# CONFIG_USB_PRINTER is not set +# CONFIG_USB_WDM is not set +# CONFIG_USB_TMC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=y +# CONFIG_USB_STORAGE_DEBUG is not set +# CONFIG_USB_STORAGE_DATAFAB is not set +# CONFIG_USB_STORAGE_FREECOM is not set +# CONFIG_USB_STORAGE_ISD200 is not set +# CONFIG_USB_STORAGE_USBAT is not set +# CONFIG_USB_STORAGE_SDDR09 is not set +# CONFIG_USB_STORAGE_SDDR55 is not set +# CONFIG_USB_STORAGE_JUMPSHOT is not set +# CONFIG_USB_STORAGE_ALAUDA is not set +# CONFIG_USB_STORAGE_ONETOUCH is not set +# CONFIG_USB_STORAGE_KARMA is not set +# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set +# CONFIG_USB_LIBUSUAL is not set + +# +# USB Imaging devices +# +# CONFIG_USB_MDC800 is not set +# CONFIG_USB_MICROTEK is not set + +# +# USB port drivers +# +# CONFIG_USB_SERIAL is not set + +# +# USB Miscellaneous drivers +# +# CONFIG_USB_EMI62 is not set +# CONFIG_USB_EMI26 is not set +# CONFIG_USB_ADUTUX is not set +# CONFIG_USB_SEVSEG is not set +# CONFIG_USB_RIO500 is not set +# CONFIG_USB_LEGOTOWER is not set +# CONFIG_USB_LCD is not set +# CONFIG_USB_BERRY_CHARGE is not set +# CONFIG_USB_LED is not set +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +# CONFIG_USB_IDMOUSE is not set +# CONFIG_USB_FTDI_ELAN is not set +# CONFIG_USB_APPLEDISPLAY is not set +# CONFIG_USB_SISUSBVGA is not set +# CONFIG_USB_LD is not set +# CONFIG_USB_TRANCEVIBRATOR is not set +# CONFIG_USB_IOWARRIOR is not set +# CONFIG_USB_TEST is not set +# CONFIG_USB_ISIGHTFW is not set +# CONFIG_USB_VST is not set +# CONFIG_USB_GADGET is not set + +# +# OTG and related infrastructure +# +CONFIG_USB_OTG_UTILS=y +# CONFIG_USB_GPIO_VBUS is not set +CONFIG_NOP_USB_XCEIV=y +# CONFIG_MMC is not set +# CONFIG_MEMSTICK is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y + +# +# LED drivers +# +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_GPIO is not set +# CONFIG_LEDS_LP3944 is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_BD2802 is not set +CONFIG_LEDS_ADP5520=y + +# +# LED Triggers +# +# CONFIG_LEDS_TRIGGERS is not set +# CONFIG_ACCESSIBILITY is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_DS3234 is not set +# CONFIG_RTC_DRV_PCF2123 is not set + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +CONFIG_RTC_DRV_BFIN=y +# CONFIG_DMADEVICES is not set +# CONFIG_AUXDISPLAY is not set +# CONFIG_UIO is not set + +# +# TI VLYNQ +# +# CONFIG_STAGING is not set + +# +# Firmware Drivers +# +# CONFIG_FIRMWARE_MEMMAP is not set +# CONFIG_SIGMA is not set + +# +# File systems +# +CONFIG_EXT2_FS=m +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT3_FS is not set +# CONFIG_EXT4_FS is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_FS_POSIX_ACL is not set +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y +# CONFIG_DNOTIFY is not set +CONFIG_INOTIFY=y +CONFIG_INOTIFY_USER=y +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set +# CONFIG_FUSE_FS is not set + +# +# Caches +# +# CONFIG_FSCACHE is not set + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +# CONFIG_ZISOFS is not set +CONFIG_UDF_FS=m +CONFIG_UDF_NLS=y + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=m +# CONFIG_MSDOS_FS is not set +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_SYSCTL=y +CONFIG_SYSFS=y +# CONFIG_HUGETLB_PAGE is not set +# CONFIG_CONFIGFS_FS is not set +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +CONFIG_JFFS2_FS=m +CONFIG_JFFS2_FS_DEBUG=0 +CONFIG_JFFS2_FS_WRITEBUFFER=y +# CONFIG_JFFS2_FS_WBUF_VERIFY is not set +# CONFIG_JFFS2_SUMMARY is not set +# CONFIG_JFFS2_FS_XATTR is not set +# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set +CONFIG_JFFS2_ZLIB=y +# CONFIG_JFFS2_LZO is not set +CONFIG_JFFS2_RTIME=y +# CONFIG_JFFS2_RUBIN is not set +# CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +CONFIG_NFS_V3=y +# CONFIG_NFS_V3_ACL is not set +# CONFIG_NFS_V4 is not set +# CONFIG_NFSD is not set +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=m +# CONFIG_RPCSEC_GSS_KRB5 is not set +# CONFIG_RPCSEC_GSS_SPKM3 is not set +CONFIG_SMB_FS=m +# CONFIG_SMB_NLS_DEFAULT is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=m +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +CONFIG_NLS_CODEPAGE_936=m +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +CONFIG_NLS_ISO8859_1=m +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +CONFIG_NLS_UTF8=m +# CONFIG_DLM is not set + +# +# Kernel hacking +# +# CONFIG_PRINTK_TIME is not set +CONFIG_ENABLE_WARN_DEPRECATED=y +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 +# CONFIG_MAGIC_SYSRQ is not set +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_SHIRQ=y +CONFIG_DETECT_SOFTLOCKUP=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_DETECT_HUNG_TASK=y +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +CONFIG_SCHED_DEBUG=y +# CONFIG_SCHEDSTATS is not set +# CONFIG_TIMER_STATS is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_DEBUG_SLAB is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_SPINLOCK_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_NOMMU_REGIONS is not set +# CONFIG_DEBUG_WRITECOUNT is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_LIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set +# CONFIG_FRAME_POINTER is not set +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_TRACING_SUPPORT=y +# CONFIG_FTRACE is not set +# CONFIG_BRANCH_PROFILE_NONE is not set +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_DYNAMIC_DEBUG is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +# CONFIG_DEBUG_STACKOVERFLOW is not set +# CONFIG_DEBUG_STACK_USAGE is not set +CONFIG_DEBUG_VERBOSE=y +CONFIG_DEBUG_MMRS=y +CONFIG_DEBUG_HWERR=y +CONFIG_EXACT_HWERR=y +CONFIG_DEBUG_DOUBLEFAULT=y +CONFIG_DEBUG_DOUBLEFAULT_PRINT=y +# CONFIG_DEBUG_DOUBLEFAULT_RESET is not set +# CONFIG_DEBUG_ICACHE_CHECK is not set +CONFIG_DEBUG_HUNT_FOR_ZERO=y +CONFIG_DEBUG_BFIN_HWTRACE_ON=y +# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF is not set +CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y +# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_TWO is not set +CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=1 +# CONFIG_DEBUG_BFIN_HWTRACE_EXPAND is not set +CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE=y +CONFIG_EARLY_PRINTK=y +CONFIG_CPLB_INFO=y +CONFIG_ACCESS_CHECK=y +# CONFIG_BFIN_ISRAM_SELF_TEST is not set + +# +# Security options +# +# CONFIG_KEYS is not set +CONFIG_SECURITY=y +# CONFIG_SECURITYFS is not set +# CONFIG_SECURITY_NETWORK is not set +# CONFIG_SECURITY_PATH is not set +# CONFIG_SECURITY_FILE_CAPABILITIES is not set +# CONFIG_SECURITY_ROOTPLUG is not set +# CONFIG_SECURITY_TOMOYO is not set +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +# CONFIG_CRYPTO_MANAGER is not set +# CONFIG_CRYPTO_MANAGER2 is not set +# CONFIG_CRYPTO_GF128MUL is not set +# CONFIG_CRYPTO_NULL is not set +# CONFIG_CRYPTO_CRYPTD is not set +# CONFIG_CRYPTO_AUTHENC is not set +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_SEQIV is not set + +# +# Block modes +# +# CONFIG_CRYPTO_CBC is not set +# CONFIG_CRYPTO_CTR is not set +# CONFIG_CRYPTO_CTS is not set +# CONFIG_CRYPTO_ECB is not set +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set + +# +# Hash modes +# +# CONFIG_CRYPTO_HMAC is not set +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +# CONFIG_CRYPTO_CRC32C is not set +# CONFIG_CRYPTO_GHASH is not set +# CONFIG_CRYPTO_MD4 is not set +# CONFIG_CRYPTO_MD5 is not set +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +# CONFIG_CRYPTO_SHA1 is not set +# CONFIG_CRYPTO_SHA256 is not set +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +# CONFIG_CRYPTO_AES is not set +# CONFIG_CRYPTO_ANUBIS is not set +# CONFIG_CRYPTO_ARC4 is not set +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +# CONFIG_CRYPTO_DES is not set +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +# CONFIG_CRYPTO_TWOFISH is not set + +# +# Compression +# +# CONFIG_CRYPTO_DEFLATE is not set +# CONFIG_CRYPTO_ZLIB is not set +# CONFIG_CRYPTO_LZO is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +CONFIG_CRYPTO_HW=y +# CONFIG_BINARY_PRINTF is not set + +# +# Library routines +# +CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y +CONFIG_CRC_CCITT=m +# CONFIG_CRC16 is not set +# CONFIG_CRC_T10DIF is not set +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +# CONFIG_CRC7 is not set +# CONFIG_LIBCRC32C is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=m +CONFIG_DECOMPRESS_GZIP=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/blackfin/mach-bf527/boards/Kconfig b/arch/blackfin/mach-bf527/boards/Kconfig index df224d04e167..b14c28810a44 100644 --- a/arch/blackfin/mach-bf527/boards/Kconfig +++ b/arch/blackfin/mach-bf527/boards/Kconfig @@ -9,6 +9,11 @@ config BFIN527_EZKIT help BF527-EZKIT-LITE board support. +config BFIN527_EZKIT_V2 + bool "BF527-EZKIT-V2" + help + BF527-EZKIT-LITE V2.1+ board support. + config BFIN527_BLUETECHNIX_CM bool "Bluetechnix CM-BF527" help diff --git a/arch/blackfin/mach-bf527/boards/Makefile b/arch/blackfin/mach-bf527/boards/Makefile index eb6ed3362f9f..51a5817c4a90 100644 --- a/arch/blackfin/mach-bf527/boards/Makefile +++ b/arch/blackfin/mach-bf527/boards/Makefile @@ -3,5 +3,6 @@ # obj-$(CONFIG_BFIN527_EZKIT) += ezkit.o +obj-$(CONFIG_BFIN527_EZKIT_V2) += ezkit.o obj-$(CONFIG_BFIN527_BLUETECHNIX_CM) += cm_bf527.o obj-$(CONFIG_BFIN526_EZBRD) += ezbrd.o diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c index 6138b47fbbaf..9db506bdf4f4 100644 --- a/arch/blackfin/mach-bf527/boards/ezkit.c +++ b/arch/blackfin/mach-bf527/boards/ezkit.c @@ -18,6 +18,8 @@ #include #include #include +#include +#include #include #include #include @@ -29,7 +31,11 @@ /* * Name the Board for the /proc/cpuinfo */ +#ifdef CONFIG_BFIN527_EZKIT_V2 +const char bfin_board_name[] = "ADI BF527-EZKIT V2"; +#else const char bfin_board_name[] = "ADI BF527-EZKIT"; +#endif /* * Driver needs to know address, irq and flag pin. @@ -143,6 +149,32 @@ static struct platform_device bf52x_t350mcqb_device = { }; #endif +#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) +#include + +static struct bfin_lq035q1fb_disp_info bfin_lq035q1_data = { + .mode = LQ035_NORM | LQ035_RGB | LQ035_RL | LQ035_TB, +}; + +static struct resource bfin_lq035q1_resources[] = { + { + .start = IRQ_PPI_ERROR, + .end = IRQ_PPI_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device bfin_lq035q1_device = { + .name = "bfin-lq035q1", + .id = -1, + .num_resources = ARRAY_SIZE(bfin_lq035q1_resources), + .resource = bfin_lq035q1_resources, + .dev = { + .platform_data = &bfin_lq035q1_data, + }, +}; +#endif + #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) static struct mtd_partition ezkit_partitions[] = { { @@ -500,14 +532,6 @@ static struct bfin5xx_spi_chip spi_ad7879_chip_info = { }; #endif -#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \ - && defined(CONFIG_SND_SOC_WM8731_SPI) -static struct bfin5xx_spi_chip spi_wm8731_chip_info = { - .enable_dma = 0, - .bits_per_word = 16, -}; -#endif - #if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) static struct bfin5xx_spi_chip spidev_chip_info = { .enable_dma = 0, @@ -515,6 +539,13 @@ static struct bfin5xx_spi_chip spidev_chip_info = { }; #endif +#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) +static struct bfin5xx_spi_chip lq035q1_spi_chip_info = { + .enable_dma = 0, + .bits_per_word = 8, +}; +#endif + static struct spi_board_info bfin_spi_board_info[] __initdata = { #if defined(CONFIG_MTD_M25P80) \ || defined(CONFIG_MTD_M25P80_MODULE) @@ -586,17 +617,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { .mode = SPI_CPHA | SPI_CPOL, }, #endif -#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \ - && defined(CONFIG_SND_SOC_WM8731_SPI) - { - .modalias = "wm8731", - .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ - .bus_num = 0, - .chip_select = 5, - .controller_data = &spi_wm8731_chip_info, - .mode = SPI_MODE_0, - }, -#endif #if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) { .modalias = "spidev", @@ -606,6 +626,16 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { .controller_data = &spidev_chip_info, }, #endif +#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) + { + .modalias = "bfin-lq035q1-spi", + .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 0, + .chip_select = 7, + .controller_data = &lq035q1_spi_chip_info, + .mode = SPI_CPHA | SPI_CPOL, + }, +#endif }; #if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) @@ -824,6 +854,71 @@ static struct platform_device i2c_bfin_twi_device = { }; #endif +#if defined(CONFIG_PMIC_ADP5520) || defined(CONFIG_PMIC_ADP5520_MODULE) +#include + + /* + * ADP5520/5501 LEDs Data + */ + +static struct led_info adp5520_leds[] = { + { + .name = "adp5520-led1", + .default_trigger = "none", + .flags = FLAG_ID_ADP5520_LED1_ADP5501_LED0 | ADP5520_LED_OFFT_600ms, + }, +}; + +static struct adp5520_leds_platform_data adp5520_leds_data = { + .num_leds = ARRAY_SIZE(adp5520_leds), + .leds = adp5520_leds, + .fade_in = ADP5520_FADE_T_600ms, + .fade_out = ADP5520_FADE_T_600ms, + .led_on_time = ADP5520_LED_ONT_600ms, +}; + + /* + * ADP5520 Keypad Data + */ + +static const unsigned short adp5520_keymap[ADP5520_KEYMAPSIZE] = { + [ADP5520_KEY(3, 3)] = KEY_1, + [ADP5520_KEY(2, 3)] = KEY_2, + [ADP5520_KEY(1, 3)] = KEY_3, + [ADP5520_KEY(0, 3)] = KEY_UP, + [ADP5520_KEY(3, 2)] = KEY_4, + [ADP5520_KEY(2, 2)] = KEY_5, + [ADP5520_KEY(1, 2)] = KEY_6, + [ADP5520_KEY(0, 2)] = KEY_DOWN, + [ADP5520_KEY(3, 1)] = KEY_7, + [ADP5520_KEY(2, 1)] = KEY_8, + [ADP5520_KEY(1, 1)] = KEY_9, + [ADP5520_KEY(0, 1)] = KEY_DOT, + [ADP5520_KEY(3, 0)] = KEY_BACKSPACE, + [ADP5520_KEY(2, 0)] = KEY_0, + [ADP5520_KEY(1, 0)] = KEY_HELP, + [ADP5520_KEY(0, 0)] = KEY_ENTER, +}; + +static struct adp5520_keys_platform_data adp5520_keys_data = { + .rows_en_mask = ADP5520_ROW_R3 | ADP5520_ROW_R2 | ADP5520_ROW_R1 | ADP5520_ROW_R0, + .cols_en_mask = ADP5520_COL_C3 | ADP5520_COL_C2 | ADP5520_COL_C1 | ADP5520_COL_C0, + .keymap = adp5520_keymap, + .keymapsize = ARRAY_SIZE(adp5520_keymap), + .repeat = 0, +}; + + /* + * ADP5520/5501 Multifuction Device Init Data + */ + +static struct adp5520_platform_data adp5520_pdev_data = { + .leds = &adp5520_leds_data, + .keys = &adp5520_keys_data, +}; + +#endif + static struct i2c_board_info __initdata bfin_i2c_board_info[] = { #if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE) { @@ -841,6 +936,20 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = { I2C_BOARD_INFO("bfin-adv7393", 0x2B), }, #endif +#if defined(CONFIG_TOUCHSCREEN_AD7879_I2C) || defined(CONFIG_TOUCHSCREEN_AD7879_I2C_MODULE) + { + I2C_BOARD_INFO("ad7879", 0x2C), + .irq = IRQ_PF8, + .platform_data = (void *)&bfin_ad7879_ts_info, + }, +#endif +#if defined(CONFIG_PMIC_ADP5520) || defined(CONFIG_PMIC_ADP5520_MODULE) + { + I2C_BOARD_INFO("pmic-adp5520", 0x32), + .irq = IRQ_PF9, + .platform_data = (void *)&adp5520_pdev_data, + }, +#endif }; #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) @@ -915,7 +1024,6 @@ static struct platform_device bfin_sport1_uart_device = { #endif #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) -#include #include static struct gpio_keys_button bfin_gpio_keys_table[] = { @@ -937,7 +1045,6 @@ static struct platform_device bfin_device_gpiokeys = { #endif #if defined(CONFIG_INPUT_BFIN_ROTARY) || defined(CONFIG_INPUT_BFIN_ROTARY_MODULE) -#include #include static struct bfin_rotary_platform_data bfin_rotary_data = { @@ -1043,6 +1150,10 @@ static struct platform_device *stamp_devices[] __initdata = { &bf52x_t350mcqb_device, #endif +#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) + &bfin_lq035q1_device, +#endif + #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, -- cgit v1.2.3 From d94a1aa44ed6bfe8d8ab36c02de652d4fcf0d2c3 Mon Sep 17 00:00:00 2001 From: Michael Hennerich Date: Tue, 8 Dec 2009 11:45:55 +0000 Subject: Blackfin: extend bfin-lq035q1-fb resources to include PPI mode This lets us support the new BF527-EZKIT V2.1 via platform resources tweaks only. Signed-off-by: Michael Hennerich Signed-off-by: Mike Frysinger --- arch/blackfin/include/asm/bfin-lq035q1.h | 12 ++++++++++++ arch/blackfin/mach-bf527/boards/ezbrd.c | 7 ++++--- arch/blackfin/mach-bf527/boards/ezkit.c | 1 + arch/blackfin/mach-bf537/boards/stamp.c | 11 ++++++----- arch/blackfin/mach-bf538/boards/ezkit.c | 7 ++++--- 5 files changed, 27 insertions(+), 11 deletions(-) diff --git a/arch/blackfin/include/asm/bfin-lq035q1.h b/arch/blackfin/include/asm/bfin-lq035q1.h index 57bc21ac2296..836895156b5b 100644 --- a/arch/blackfin/include/asm/bfin-lq035q1.h +++ b/arch/blackfin/include/asm/bfin-lq035q1.h @@ -8,6 +8,9 @@ #ifndef BFIN_LQ035Q1_H #define BFIN_LQ035Q1_H +/* + * LCD Modes + */ #define LQ035_RL (0 << 8) /* Right -> Left Scan */ #define LQ035_LR (1 << 8) /* Left -> Right Scan */ #define LQ035_TB (1 << 9) /* Top -> Botton Scan */ @@ -17,9 +20,18 @@ #define LQ035_NORM (1 << 13) /* Reversal */ #define LQ035_REV (0 << 13) /* Reversal */ +/* + * PPI Modes + */ + +#define USE_RGB565_16_BIT_PPI 1 +#define USE_RGB565_8_BIT_PPI 2 +#define USE_RGB888_8_BIT_PPI 3 + struct bfin_lq035q1fb_disp_info { unsigned mode; + unsigned ppi_mode; /* GPIOs */ int use_bl; unsigned gpio_bl; diff --git a/arch/blackfin/mach-bf527/boards/ezbrd.c b/arch/blackfin/mach-bf527/boards/ezbrd.c index 3ff61e6fbe95..faede2b964ba 100644 --- a/arch/blackfin/mach-bf527/boards/ezbrd.c +++ b/arch/blackfin/mach-bf527/boards/ezbrd.c @@ -749,9 +749,10 @@ static struct platform_device bfin_dpmc = { #include static struct bfin_lq035q1fb_disp_info bfin_lq035q1_data = { - .mode = LQ035_NORM | LQ035_RGB | LQ035_RL | LQ035_TB, - .use_bl = 1, - .gpio_bl = GPIO_PG12, + .mode = LQ035_NORM | LQ035_RGB | LQ035_RL | LQ035_TB, + .ppi_mode = USE_RGB565_16_BIT_PPI, + .use_bl = 1, + .gpio_bl = GPIO_PG12, }; static struct resource bfin_lq035q1_resources[] = { diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c index 9db506bdf4f4..fdfe8cae49bc 100644 --- a/arch/blackfin/mach-bf527/boards/ezkit.c +++ b/arch/blackfin/mach-bf527/boards/ezkit.c @@ -154,6 +154,7 @@ static struct platform_device bf52x_t350mcqb_device = { static struct bfin_lq035q1fb_disp_info bfin_lq035q1_data = { .mode = LQ035_NORM | LQ035_RGB | LQ035_RL | LQ035_TB, + .ppi_mode = USE_RGB565_8_BIT_PPI, }; static struct resource bfin_lq035q1_resources[] = { diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c index 3cb20d7efbdb..538bff83e607 100644 --- a/arch/blackfin/mach-bf537/boards/stamp.c +++ b/arch/blackfin/mach-bf537/boards/stamp.c @@ -1131,9 +1131,10 @@ static struct platform_device bfin_fb_device = { #include static struct bfin_lq035q1fb_disp_info bfin_lq035q1_data = { - .mode = LQ035_NORM | LQ035_RGB | LQ035_RL | LQ035_TB, - .use_bl = 0, /* let something else control the LCD Blacklight */ - .gpio_bl = GPIO_PF7, + .mode = LQ035_NORM | LQ035_RGB | LQ035_RL | LQ035_TB, + .ppi_mode = USE_RGB565_16_BIT_PPI, + .use_bl = 0, /* let something else control the LCD Blacklight */ + .gpio_bl = GPIO_PF7, }; static struct resource bfin_lq035q1_resources[] = { @@ -1147,8 +1148,8 @@ static struct resource bfin_lq035q1_resources[] = { static struct platform_device bfin_lq035q1_device = { .name = "bfin-lq035q1", .id = -1, - .num_resources = ARRAY_SIZE(bfin_lq035q1_resources), - .resource = bfin_lq035q1_resources, + .num_resources = ARRAY_SIZE(bfin_lq035q1_resources), + .resource = bfin_lq035q1_resources, .dev = { .platform_data = &bfin_lq035q1_data, }, diff --git a/arch/blackfin/mach-bf538/boards/ezkit.c b/arch/blackfin/mach-bf538/boards/ezkit.c index 20387fe09c61..d2d06f046ad0 100644 --- a/arch/blackfin/mach-bf538/boards/ezkit.c +++ b/arch/blackfin/mach-bf538/boards/ezkit.c @@ -534,9 +534,10 @@ static struct bfin5xx_spi_chip spi_ad7879_chip_info = { #include static struct bfin_lq035q1fb_disp_info bfin_lq035q1_data = { - .mode = LQ035_NORM | LQ035_RGB | LQ035_RL | LQ035_TB, - .use_bl = 0, /* let something else control the LCD Blacklight */ - .gpio_bl = GPIO_PF7, + .mode = LQ035_NORM | LQ035_RGB | LQ035_RL | LQ035_TB, + .ppi_mode = USE_RGB565_16_BIT_PPI, + .use_bl = 0, /* let something else control the LCD Blacklight */ + .gpio_bl = GPIO_PF7, }; static struct resource bfin_lq035q1_resources[] = { -- cgit v1.2.3 From 5792ab2a0a22fdaef33056ca2b31847a28b1af60 Mon Sep 17 00:00:00 2001 From: Sonic Zhang Date: Wed, 9 Dec 2009 07:01:50 +0000 Subject: Blackfin: MPU: handle caches for reserved memory We weren't handling the user-specified cache behavior for the reserved memory regions (via mem=/max_mem=). The no-MPU code already takes care of this, so add support to the MPU code as well. Signed-off-by: Sonic Zhang Signed-off-by: Mike Frysinger --- arch/blackfin/kernel/cplb-mpu/cplbmgr.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c index d4cc53a0ef89..7e6383dc7b20 100644 --- a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c +++ b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c @@ -131,7 +131,9 @@ static noinline int dcplb_miss(unsigned int cpu) } else return CPLB_PROT_VIOL; } else if (addr >= _ramend) { - d_data |= CPLB_USER_RD | CPLB_USER_WR; + d_data |= CPLB_USER_RD | CPLB_USER_WR; + if (reserved_mem_dcache_on) + d_data |= CPLB_L1_CHBL; } else { mask = current_rwx_mask[cpu]; if (mask) { @@ -231,6 +233,8 @@ static noinline int icplb_miss(unsigned int cpu) return CPLB_PROT_VIOL; } else if (addr >= _ramend) { i_data |= CPLB_USER_RD; + if (reserved_mem_icache_on) + i_data |= CPLB_L1_CHBL; } else { /* * Two cases to distinguish - a supervisor access must -- cgit v1.2.3 From ead9b1156d4b128db9f13ee8be32cbcd3f255db9 Mon Sep 17 00:00:00 2001 From: Graf Yang Date: Mon, 14 Dec 2009 08:01:08 +0000 Subject: Blackfin: SMP: enable HAVE_LEGACY_PER_CPU_AREA The current Blackfin SMP code relies on the legacy cpu area code, so select it until we port things to the newer code. Signed-off-by: Graf Yang Signed-off-by: Mike Frysinger --- arch/blackfin/Kconfig | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index 9409ae369517..1f075be7717f 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig @@ -258,6 +258,10 @@ config IRQ_PER_CPU depends on SMP default y +config HAVE_LEGACY_PER_CPU_AREA + def_bool y + depends on SMP + config BF_REV_MIN int default 0 if (BF51x || BF52x || (BF54x && !BF54xM)) -- cgit v1.2.3 From 360adee8a5c249f221de43d7d46073391359a4ba Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Tue, 15 Dec 2009 05:08:36 +0000 Subject: Blackfin: wire up the various memory related syscalls These all just go to the stub syscall at the moment, so this is largely future proofing. Signed-off-by: Mike Frysinger --- arch/blackfin/mach-common/entry.S | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S index 01b2f58dfb95..0df7ef200f9d 100644 --- a/arch/blackfin/mach-common/entry.S +++ b/arch/blackfin/mach-common/entry.S @@ -1357,7 +1357,7 @@ ENTRY(_sys_call_table) .long _sys_newuname .long _sys_ni_syscall /* old sys_modify_ldt */ .long _sys_adjtimex - .long _sys_ni_syscall /* 125 */ /* sys_mprotect */ + .long _sys_mprotect /* 125 */ .long _sys_ni_syscall /* old sys_sigprocmask */ .long _sys_ni_syscall /* old "creat_module" */ .long _sys_init_module @@ -1376,16 +1376,16 @@ ENTRY(_sys_call_table) .long _sys_getdents .long _sys_ni_syscall /* sys_select */ .long _sys_flock - .long _sys_ni_syscall /* sys_msync */ + .long _sys_msync .long _sys_readv /* 145 */ .long _sys_writev .long _sys_getsid .long _sys_fdatasync .long _sys_sysctl - .long _sys_ni_syscall /* 150 */ /* sys_mlock */ - .long _sys_ni_syscall /* sys_munlock */ - .long _sys_ni_syscall /* sys_mlockall */ - .long _sys_ni_syscall /* sys_munlockall */ + .long _sys_mlock /* 150 */ + .long _sys_munlock + .long _sys_mlockall + .long _sys_munlockall .long _sys_sched_setparam .long _sys_sched_getparam /* 155 */ .long _sys_sched_setscheduler @@ -1450,8 +1450,8 @@ ENTRY(_sys_call_table) .long _sys_setfsuid /* 215 */ .long _sys_setfsgid .long _sys_pivot_root - .long _sys_ni_syscall /* sys_mincore */ - .long _sys_ni_syscall /* sys_madvise */ + .long _sys_mincore + .long _sys_madvise .long _sys_getdents64 /* 220 */ .long _sys_fcntl64 .long _sys_ni_syscall /* reserved for TUX */ @@ -1507,7 +1507,7 @@ ENTRY(_sys_call_table) .long _sys_utimes .long _sys_fadvise64_64 .long _sys_ni_syscall /* vserver */ - .long _sys_ni_syscall /* 275, mbind */ + .long _sys_mbind /* 275 */ .long _sys_ni_syscall /* get_mempolicy */ .long _sys_ni_syscall /* set_mempolicy */ .long _sys_mq_open -- cgit v1.2.3 From 0e64268ced7975ddcfed3e47a26b8df534f8e3ed Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Tue, 15 Dec 2009 13:33:19 +0000 Subject: Blackfin: merge common CAN defines into one headers Rather than copy and paste the MMR defines and register layout, consolidate everything in one place. Signed-off-by: Mike Frysinger --- arch/blackfin/include/asm/bfin_can.h | 725 +++++++++++++++++++++++++++++++++++ 1 file changed, 725 insertions(+) create mode 100644 arch/blackfin/include/asm/bfin_can.h diff --git a/arch/blackfin/include/asm/bfin_can.h b/arch/blackfin/include/asm/bfin_can.h new file mode 100644 index 000000000000..eec0076a385b --- /dev/null +++ b/arch/blackfin/include/asm/bfin_can.h @@ -0,0 +1,725 @@ +/* + * bfin_can.h - interface to Blackfin CANs + * + * Copyright 2004-2009 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#ifndef __ASM_BFIN_CAN_H__ +#define __ASM_BFIN_CAN_H__ + +/* + * transmit and receive channels + */ +#define TRANSMIT_CHL 24 +#define RECEIVE_STD_CHL 0 +#define RECEIVE_EXT_CHL 4 +#define RECEIVE_RTR_CHL 8 +#define RECEIVE_EXT_RTR_CHL 12 +#define MAX_CHL_NUMBER 32 + +/* + * All Blackfin system MMRs are padded to 32bits even if the register + * itself is only 16bits. So use a helper macro to streamline this. + */ +#define __BFP(m) u16 m; u16 __pad_##m + +/* + * bfin can registers layout + */ +struct bfin_can_mask_regs { + __BFP(aml); + __BFP(amh); +}; + +struct bfin_can_channel_regs { + u16 data[8]; + __BFP(dlc); + __BFP(tsv); + __BFP(id0); + __BFP(id1); +}; + +struct bfin_can_regs { + /* + * global control and status registers + */ + __BFP(mc1); /* offset 0x00 */ + __BFP(md1); /* offset 0x04 */ + __BFP(trs1); /* offset 0x08 */ + __BFP(trr1); /* offset 0x0c */ + __BFP(ta1); /* offset 0x10 */ + __BFP(aa1); /* offset 0x14 */ + __BFP(rmp1); /* offset 0x18 */ + __BFP(rml1); /* offset 0x1c */ + __BFP(mbtif1); /* offset 0x20 */ + __BFP(mbrif1); /* offset 0x24 */ + __BFP(mbim1); /* offset 0x28 */ + __BFP(rfh1); /* offset 0x2c */ + __BFP(opss1); /* offset 0x30 */ + u32 __pad1[3]; + __BFP(mc2); /* offset 0x40 */ + __BFP(md2); /* offset 0x44 */ + __BFP(trs2); /* offset 0x48 */ + __BFP(trr2); /* offset 0x4c */ + __BFP(ta2); /* offset 0x50 */ + __BFP(aa2); /* offset 0x54 */ + __BFP(rmp2); /* offset 0x58 */ + __BFP(rml2); /* offset 0x5c */ + __BFP(mbtif2); /* offset 0x60 */ + __BFP(mbrif2); /* offset 0x64 */ + __BFP(mbim2); /* offset 0x68 */ + __BFP(rfh2); /* offset 0x6c */ + __BFP(opss2); /* offset 0x70 */ + u32 __pad2[3]; + __BFP(clock); /* offset 0x80 */ + __BFP(timing); /* offset 0x84 */ + __BFP(debug); /* offset 0x88 */ + __BFP(status); /* offset 0x8c */ + __BFP(cec); /* offset 0x90 */ + __BFP(gis); /* offset 0x94 */ + __BFP(gim); /* offset 0x98 */ + __BFP(gif); /* offset 0x9c */ + __BFP(control); /* offset 0xa0 */ + __BFP(intr); /* offset 0xa4 */ + u32 __pad3[1]; + __BFP(mbtd); /* offset 0xac */ + __BFP(ewr); /* offset 0xb0 */ + __BFP(esr); /* offset 0xb4 */ + u32 __pad4[2]; + __BFP(ucreg); /* offset 0xc0 */ + __BFP(uccnt); /* offset 0xc4 */ + __BFP(ucrc); /* offset 0xc8 */ + __BFP(uccnf); /* offset 0xcc */ + u32 __pad5[12]; + + /* + * channel(mailbox) mask and message registers + */ + struct bfin_can_mask_regs msk[MAX_CHL_NUMBER]; /* offset 0x100 */ + struct bfin_can_channel_regs chl[MAX_CHL_NUMBER]; /* offset 0x200 */ +}; + +#undef __BFP + +/* CAN_CONTROL Masks */ +#define SRS 0x0001 /* Software Reset */ +#define DNM 0x0002 /* Device Net Mode */ +#define ABO 0x0004 /* Auto-Bus On Enable */ +#define TXPRIO 0x0008 /* TX Priority (Priority/Mailbox*) */ +#define WBA 0x0010 /* Wake-Up On CAN Bus Activity Enable */ +#define SMR 0x0020 /* Sleep Mode Request */ +#define CSR 0x0040 /* CAN Suspend Mode Request */ +#define CCR 0x0080 /* CAN Configuration Mode Request */ + +/* CAN_STATUS Masks */ +#define WT 0x0001 /* TX Warning Flag */ +#define WR 0x0002 /* RX Warning Flag */ +#define EP 0x0004 /* Error Passive Mode */ +#define EBO 0x0008 /* Error Bus Off Mode */ +#define SMA 0x0020 /* Sleep Mode Acknowledge */ +#define CSA 0x0040 /* Suspend Mode Acknowledge */ +#define CCA 0x0080 /* Configuration Mode Acknowledge */ +#define MBPTR 0x1F00 /* Mailbox Pointer */ +#define TRM 0x4000 /* Transmit Mode */ +#define REC 0x8000 /* Receive Mode */ + +/* CAN_CLOCK Masks */ +#define BRP 0x03FF /* Bit-Rate Pre-Scaler */ + +/* CAN_TIMING Masks */ +#define TSEG1 0x000F /* Time Segment 1 */ +#define TSEG2 0x0070 /* Time Segment 2 */ +#define SAM 0x0080 /* Sampling */ +#define SJW 0x0300 /* Synchronization Jump Width */ + +/* CAN_DEBUG Masks */ +#define DEC 0x0001 /* Disable CAN Error Counters */ +#define DRI 0x0002 /* Disable CAN RX Input */ +#define DTO 0x0004 /* Disable CAN TX Output */ +#define DIL 0x0008 /* Disable CAN Internal Loop */ +#define MAA 0x0010 /* Mode Auto-Acknowledge Enable */ +#define MRB 0x0020 /* Mode Read Back Enable */ +#define CDE 0x8000 /* CAN Debug Enable */ + +/* CAN_CEC Masks */ +#define RXECNT 0x00FF /* Receive Error Counter */ +#define TXECNT 0xFF00 /* Transmit Error Counter */ + +/* CAN_INTR Masks */ +#define MBRIRQ 0x0001 /* Mailbox Receive Interrupt */ +#define MBTIRQ 0x0002 /* Mailbox Transmit Interrupt */ +#define GIRQ 0x0004 /* Global Interrupt */ +#define SMACK 0x0008 /* Sleep Mode Acknowledge */ +#define CANTX 0x0040 /* CAN TX Bus Value */ +#define CANRX 0x0080 /* CAN RX Bus Value */ + +/* CAN_MBxx_ID1 and CAN_MBxx_ID0 Masks */ +#define DFC 0xFFFF /* Data Filtering Code (If Enabled) (ID0) */ +#define EXTID_LO 0xFFFF /* Lower 16 Bits of Extended Identifier (ID0) */ +#define EXTID_HI 0x0003 /* Upper 2 Bits of Extended Identifier (ID1) */ +#define BASEID 0x1FFC /* Base Identifier */ +#define IDE 0x2000 /* Identifier Extension */ +#define RTR 0x4000 /* Remote Frame Transmission Request */ +#define AME 0x8000 /* Acceptance Mask Enable */ + +/* CAN_MBxx_TIMESTAMP Masks */ +#define TSV 0xFFFF /* Timestamp */ + +/* CAN_MBxx_LENGTH Masks */ +#define DLC 0x000F /* Data Length Code */ + +/* CAN_AMxxH and CAN_AMxxL Masks */ +#define DFM 0xFFFF /* Data Field Mask (If Enabled) (CAN_AMxxL) */ +#define EXTID_LO 0xFFFF /* Lower 16 Bits of Extended Identifier (CAN_AMxxL) */ +#define EXTID_HI 0x0003 /* Upper 2 Bits of Extended Identifier (CAN_AMxxH) */ +#define BASEID 0x1FFC /* Base Identifier */ +#define AMIDE 0x2000 /* Acceptance Mask ID Extension Enable */ +#define FMD 0x4000 /* Full Mask Data Field Enable */ +#define FDF 0x8000 /* Filter On Data Field Enable */ + +/* CAN_MC1 Masks */ +#define MC0 0x0001 /* Enable Mailbox 0 */ +#define MC1 0x0002 /* Enable Mailbox 1 */ +#define MC2 0x0004 /* Enable Mailbox 2 */ +#define MC3 0x0008 /* Enable Mailbox 3 */ +#define MC4 0x0010 /* Enable Mailbox 4 */ +#define MC5 0x0020 /* Enable Mailbox 5 */ +#define MC6 0x0040 /* Enable Mailbox 6 */ +#define MC7 0x0080 /* Enable Mailbox 7 */ +#define MC8 0x0100 /* Enable Mailbox 8 */ +#define MC9 0x0200 /* Enable Mailbox 9 */ +#define MC10 0x0400 /* Enable Mailbox 10 */ +#define MC11 0x0800 /* Enable Mailbox 11 */ +#define MC12 0x1000 /* Enable Mailbox 12 */ +#define MC13 0x2000 /* Enable Mailbox 13 */ +#define MC14 0x4000 /* Enable Mailbox 14 */ +#define MC15 0x8000 /* Enable Mailbox 15 */ + +/* CAN_MC2 Masks */ +#define MC16 0x0001 /* Enable Mailbox 16 */ +#define MC17 0x0002 /* Enable Mailbox 17 */ +#define MC18 0x0004 /* Enable Mailbox 18 */ +#define MC19 0x0008 /* Enable Mailbox 19 */ +#define MC20 0x0010 /* Enable Mailbox 20 */ +#define MC21 0x0020 /* Enable Mailbox 21 */ +#define MC22 0x0040 /* Enable Mailbox 22 */ +#define MC23 0x0080 /* Enable Mailbox 23 */ +#define MC24 0x0100 /* Enable Mailbox 24 */ +#define MC25 0x0200 /* Enable Mailbox 25 */ +#define MC26 0x0400 /* Enable Mailbox 26 */ +#define MC27 0x0800 /* Enable Mailbox 27 */ +#define MC28 0x1000 /* Enable Mailbox 28 */ +#define MC29 0x2000 /* Enable Mailbox 29 */ +#define MC30 0x4000 /* Enable Mailbox 30 */ +#define MC31 0x8000 /* Enable Mailbox 31 */ + +/* CAN_MD1 Masks */ +#define MD0 0x0001 /* Enable Mailbox 0 For Receive */ +#define MD1 0x0002 /* Enable Mailbox 1 For Receive */ +#define MD2 0x0004 /* Enable Mailbox 2 For Receive */ +#define MD3 0x0008 /* Enable Mailbox 3 For Receive */ +#define MD4 0x0010 /* Enable Mailbox 4 For Receive */ +#define MD5 0x0020 /* Enable Mailbox 5 For Receive */ +#define MD6 0x0040 /* Enable Mailbox 6 For Receive */ +#define MD7 0x0080 /* Enable Mailbox 7 For Receive */ +#define MD8 0x0100 /* Enable Mailbox 8 For Receive */ +#define MD9 0x0200 /* Enable Mailbox 9 For Receive */ +#define MD10 0x0400 /* Enable Mailbox 10 For Receive */ +#define MD11 0x0800 /* Enable Mailbox 11 For Receive */ +#define MD12 0x1000 /* Enable Mailbox 12 For Receive */ +#define MD13 0x2000 /* Enable Mailbox 13 For Receive */ +#define MD14 0x4000 /* Enable Mailbox 14 For Receive */ +#define MD15 0x8000 /* Enable Mailbox 15 For Receive */ + +/* CAN_MD2 Masks */ +#define MD16 0x0001 /* Enable Mailbox 16 For Receive */ +#define MD17 0x0002 /* Enable Mailbox 17 For Receive */ +#define MD18 0x0004 /* Enable Mailbox 18 For Receive */ +#define MD19 0x0008 /* Enable Mailbox 19 For Receive */ +#define MD20 0x0010 /* Enable Mailbox 20 For Receive */ +#define MD21 0x0020 /* Enable Mailbox 21 For Receive */ +#define MD22 0x0040 /* Enable Mailbox 22 For Receive */ +#define MD23 0x0080 /* Enable Mailbox 23 For Receive */ +#define MD24 0x0100 /* Enable Mailbox 24 For Receive */ +#define MD25 0x0200 /* Enable Mailbox 25 For Receive */ +#define MD26 0x0400 /* Enable Mailbox 26 For Receive */ +#define MD27 0x0800 /* Enable Mailbox 27 For Receive */ +#define MD28 0x1000 /* Enable Mailbox 28 For Receive */ +#define MD29 0x2000 /* Enable Mailbox 29 For Receive */ +#define MD30 0x4000 /* Enable Mailbox 30 For Receive */ +#define MD31 0x8000 /* Enable Mailbox 31 For Receive */ + +/* CAN_RMP1 Masks */ +#define RMP0 0x0001 /* RX Message Pending In Mailbox 0 */ +#define RMP1 0x0002 /* RX Message Pending In Mailbox 1 */ +#define RMP2 0x0004 /* RX Message Pending In Mailbox 2 */ +#define RMP3 0x0008 /* RX Message Pending In Mailbox 3 */ +#define RMP4 0x0010 /* RX Message Pending In Mailbox 4 */ +#define RMP5 0x0020 /* RX Message Pending In Mailbox 5 */ +#define RMP6 0x0040 /* RX Message Pending In Mailbox 6 */ +#define RMP7 0x0080 /* RX Message Pending In Mailbox 7 */ +#define RMP8 0x0100 /* RX Message Pending In Mailbox 8 */ +#define RMP9 0x0200 /* RX Message Pending In Mailbox 9 */ +#define RMP10 0x0400 /* RX Message Pending In Mailbox 10 */ +#define RMP11 0x0800 /* RX Message Pending In Mailbox 11 */ +#define RMP12 0x1000 /* RX Message Pending In Mailbox 12 */ +#define RMP13 0x2000 /* RX Message Pending In Mailbox 13 */ +#define RMP14 0x4000 /* RX Message Pending In Mailbox 14 */ +#define RMP15 0x8000 /* RX Message Pending In Mailbox 15 */ + +/* CAN_RMP2 Masks */ +#define RMP16 0x0001 /* RX Message Pending In Mailbox 16 */ +#define RMP17 0x0002 /* RX Message Pending In Mailbox 17 */ +#define RMP18 0x0004 /* RX Message Pending In Mailbox 18 */ +#define RMP19 0x0008 /* RX Message Pending In Mailbox 19 */ +#define RMP20 0x0010 /* RX Message Pending In Mailbox 20 */ +#define RMP21 0x0020 /* RX Message Pending In Mailbox 21 */ +#define RMP22 0x0040 /* RX Message Pending In Mailbox 22 */ +#define RMP23 0x0080 /* RX Message Pending In Mailbox 23 */ +#define RMP24 0x0100 /* RX Message Pending In Mailbox 24 */ +#define RMP25 0x0200 /* RX Message Pending In Mailbox 25 */ +#define RMP26 0x0400 /* RX Message Pending In Mailbox 26 */ +#define RMP27 0x0800 /* RX Message Pending In Mailbox 27 */ +#define RMP28 0x1000 /* RX Message Pending In Mailbox 28 */ +#define RMP29 0x2000 /* RX Message Pending In Mailbox 29 */ +#define RMP30 0x4000 /* RX Message Pending In Mailbox 30 */ +#define RMP31 0x8000 /* RX Message Pending In Mailbox 31 */ + +/* CAN_RML1 Masks */ +#define RML0 0x0001 /* RX Message Lost In Mailbox 0 */ +#define RML1 0x0002 /* RX Message Lost In Mailbox 1 */ +#define RML2 0x0004 /* RX Message Lost In Mailbox 2 */ +#define RML3 0x0008 /* RX Message Lost In Mailbox 3 */ +#define RML4 0x0010 /* RX Message Lost In Mailbox 4 */ +#define RML5 0x0020 /* RX Message Lost In Mailbox 5 */ +#define RML6 0x0040 /* RX Message Lost In Mailbox 6 */ +#define RML7 0x0080 /* RX Message Lost In Mailbox 7 */ +#define RML8 0x0100 /* RX Message Lost In Mailbox 8 */ +#define RML9 0x0200 /* RX Message Lost In Mailbox 9 */ +#define RML10 0x0400 /* RX Message Lost In Mailbox 10 */ +#define RML11 0x0800 /* RX Message Lost In Mailbox 11 */ +#define RML12 0x1000 /* RX Message Lost In Mailbox 12 */ +#define RML13 0x2000 /* RX Message Lost In Mailbox 13 */ +#define RML14 0x4000 /* RX Message Lost In Mailbox 14 */ +#define RML15 0x8000 /* RX Message Lost In Mailbox 15 */ + +/* CAN_RML2 Masks */ +#define RML16 0x0001 /* RX Message Lost In Mailbox 16 */ +#define RML17 0x0002 /* RX Message Lost In Mailbox 17 */ +#define RML18 0x0004 /* RX Message Lost In Mailbox 18 */ +#define RML19 0x0008 /* RX Message Lost In Mailbox 19 */ +#define RML20 0x0010 /* RX Message Lost In Mailbox 20 */ +#define RML21 0x0020 /* RX Message Lost In Mailbox 21 */ +#define RML22 0x0040 /* RX Message Lost In Mailbox 22 */ +#define RML23 0x0080 /* RX Message Lost In Mailbox 23 */ +#define RML24 0x0100 /* RX Message Lost In Mailbox 24 */ +#define RML25 0x0200 /* RX Message Lost In Mailbox 25 */ +#define RML26 0x0400 /* RX Message Lost In Mailbox 26 */ +#define RML27 0x0800 /* RX Message Lost In Mailbox 27 */ +#define RML28 0x1000 /* RX Message Lost In Mailbox 28 */ +#define RML29 0x2000 /* RX Message Lost In Mailbox 29 */ +#define RML30 0x4000 /* RX Message Lost In Mailbox 30 */ +#define RML31 0x8000 /* RX Message Lost In Mailbox 31 */ + +/* CAN_OPSS1 Masks */ +#define OPSS0 0x0001 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 0 */ +#define OPSS1 0x0002 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 1 */ +#define OPSS2 0x0004 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 2 */ +#define OPSS3 0x0008 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 3 */ +#define OPSS4 0x0010 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 4 */ +#define OPSS5 0x0020 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 5 */ +#define OPSS6 0x0040 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 6 */ +#define OPSS7 0x0080 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 7 */ +#define OPSS8 0x0100 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 8 */ +#define OPSS9 0x0200 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 9 */ +#define OPSS10 0x0400 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 10 */ +#define OPSS11 0x0800 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 11 */ +#define OPSS12 0x1000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 12 */ +#define OPSS13 0x2000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 13 */ +#define OPSS14 0x4000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 14 */ +#define OPSS15 0x8000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 15 */ + +/* CAN_OPSS2 Masks */ +#define OPSS16 0x0001 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 16 */ +#define OPSS17 0x0002 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 17 */ +#define OPSS18 0x0004 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 18 */ +#define OPSS19 0x0008 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 19 */ +#define OPSS20 0x0010 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 20 */ +#define OPSS21 0x0020 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 21 */ +#define OPSS22 0x0040 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 22 */ +#define OPSS23 0x0080 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 23 */ +#define OPSS24 0x0100 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 24 */ +#define OPSS25 0x0200 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 25 */ +#define OPSS26 0x0400 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 26 */ +#define OPSS27 0x0800 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 27 */ +#define OPSS28 0x1000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 28 */ +#define OPSS29 0x2000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 29 */ +#define OPSS30 0x4000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 30 */ +#define OPSS31 0x8000 /* Enable RX Overwrite Protection or TX Single-Shot For Mailbox 31 */ + +/* CAN_TRR1 Masks */ +#define TRR0 0x0001 /* Deny But Don't Lock Access To Mailbox 0 */ +#define TRR1 0x0002 /* Deny But Don't Lock Access To Mailbox 1 */ +#define TRR2 0x0004 /* Deny But Don't Lock Access To Mailbox 2 */ +#define TRR3 0x0008 /* Deny But Don't Lock Access To Mailbox 3 */ +#define TRR4 0x0010 /* Deny But Don't Lock Access To Mailbox 4 */ +#define TRR5 0x0020 /* Deny But Don't Lock Access To Mailbox 5 */ +#define TRR6 0x0040 /* Deny But Don't Lock Access To Mailbox 6 */ +#define TRR7 0x0080 /* Deny But Don't Lock Access To Mailbox 7 */ +#define TRR8 0x0100 /* Deny But Don't Lock Access To Mailbox 8 */ +#define TRR9 0x0200 /* Deny But Don't Lock Access To Mailbox 9 */ +#define TRR10 0x0400 /* Deny But Don't Lock Access To Mailbox 10 */ +#define TRR11 0x0800 /* Deny But Don't Lock Access To Mailbox 11 */ +#define TRR12 0x1000 /* Deny But Don't Lock Access To Mailbox 12 */ +#define TRR13 0x2000 /* Deny But Don't Lock Access To Mailbox 13 */ +#define TRR14 0x4000 /* Deny But Don't Lock Access To Mailbox 14 */ +#define TRR15 0x8000 /* Deny But Don't Lock Access To Mailbox 15 */ + +/* CAN_TRR2 Masks */ +#define TRR16 0x0001 /* Deny But Don't Lock Access To Mailbox 16 */ +#define TRR17 0x0002 /* Deny But Don't Lock Access To Mailbox 17 */ +#define TRR18 0x0004 /* Deny But Don't Lock Access To Mailbox 18 */ +#define TRR19 0x0008 /* Deny But Don't Lock Access To Mailbox 19 */ +#define TRR20 0x0010 /* Deny But Don't Lock Access To Mailbox 20 */ +#define TRR21 0x0020 /* Deny But Don't Lock Access To Mailbox 21 */ +#define TRR22 0x0040 /* Deny But Don't Lock Access To Mailbox 22 */ +#define TRR23 0x0080 /* Deny But Don't Lock Access To Mailbox 23 */ +#define TRR24 0x0100 /* Deny But Don't Lock Access To Mailbox 24 */ +#define TRR25 0x0200 /* Deny But Don't Lock Access To Mailbox 25 */ +#define TRR26 0x0400 /* Deny But Don't Lock Access To Mailbox 26 */ +#define TRR27 0x0800 /* Deny But Don't Lock Access To Mailbox 27 */ +#define TRR28 0x1000 /* Deny But Don't Lock Access To Mailbox 28 */ +#define TRR29 0x2000 /* Deny But Don't Lock Access To Mailbox 29 */ +#define TRR30 0x4000 /* Deny But Don't Lock Access To Mailbox 30 */ +#define TRR31 0x8000 /* Deny But Don't Lock Access To Mailbox 31 */ + +/* CAN_TRS1 Masks */ +#define TRS0 0x0001 /* Remote Frame Request For Mailbox 0 */ +#define TRS1 0x0002 /* Remote Frame Request For Mailbox 1 */ +#define TRS2 0x0004 /* Remote Frame Request For Mailbox 2 */ +#define TRS3 0x0008 /* Remote Frame Request For Mailbox 3 */ +#define TRS4 0x0010 /* Remote Frame Request For Mailbox 4 */ +#define TRS5 0x0020 /* Remote Frame Request For Mailbox 5 */ +#define TRS6 0x0040 /* Remote Frame Request For Mailbox 6 */ +#define TRS7 0x0080 /* Remote Frame Request For Mailbox 7 */ +#define TRS8 0x0100 /* Remote Frame Request For Mailbox 8 */ +#define TRS9 0x0200 /* Remote Frame Request For Mailbox 9 */ +#define TRS10 0x0400 /* Remote Frame Request For Mailbox 10 */ +#define TRS11 0x0800 /* Remote Frame Request For Mailbox 11 */ +#define TRS12 0x1000 /* Remote Frame Request For Mailbox 12 */ +#define TRS13 0x2000 /* Remote Frame Request For Mailbox 13 */ +#define TRS14 0x4000 /* Remote Frame Request For Mailbox 14 */ +#define TRS15 0x8000 /* Remote Frame Request For Mailbox 15 */ + +/* CAN_TRS2 Masks */ +#define TRS16 0x0001 /* Remote Frame Request For Mailbox 16 */ +#define TRS17 0x0002 /* Remote Frame Request For Mailbox 17 */ +#define TRS18 0x0004 /* Remote Frame Request For Mailbox 18 */ +#define TRS19 0x0008 /* Remote Frame Request For Mailbox 19 */ +#define TRS20 0x0010 /* Remote Frame Request For Mailbox 20 */ +#define TRS21 0x0020 /* Remote Frame Request For Mailbox 21 */ +#define TRS22 0x0040 /* Remote Frame Request For Mailbox 22 */ +#define TRS23 0x0080 /* Remote Frame Request For Mailbox 23 */ +#define TRS24 0x0100 /* Remote Frame Request For Mailbox 24 */ +#define TRS25 0x0200 /* Remote Frame Request For Mailbox 25 */ +#define TRS26 0x0400 /* Remote Frame Request For Mailbox 26 */ +#define TRS27 0x0800 /* Remote Frame Request For Mailbox 27 */ +#define TRS28 0x1000 /* Remote Frame Request For Mailbox 28 */ +#define TRS29 0x2000 /* Remote Frame Request For Mailbox 29 */ +#define TRS30 0x4000 /* Remote Frame Request For Mailbox 30 */ +#define TRS31 0x8000 /* Remote Frame Request For Mailbox 31 */ + +/* CAN_AA1 Masks */ +#define AA0 0x0001 /* Aborted Message In Mailbox 0 */ +#define AA1 0x0002 /* Aborted Message In Mailbox 1 */ +#define AA2 0x0004 /* Aborted Message In Mailbox 2 */ +#define AA3 0x0008 /* Aborted Message In Mailbox 3 */ +#define AA4 0x0010 /* Aborted Message In Mailbox 4 */ +#define AA5 0x0020 /* Aborted Message In Mailbox 5 */ +#define AA6 0x0040 /* Aborted Message In Mailbox 6 */ +#define AA7 0x0080 /* Aborted Message In Mailbox 7 */ +#define AA8 0x0100 /* Aborted Message In Mailbox 8 */ +#define AA9 0x0200 /* Aborted Message In Mailbox 9 */ +#define AA10 0x0400 /* Aborted Message In Mailbox 10 */ +#define AA11 0x0800 /* Aborted Message In Mailbox 11 */ +#define AA12 0x1000 /* Aborted Message In Mailbox 12 */ +#define AA13 0x2000 /* Aborted Message In Mailbox 13 */ +#define AA14 0x4000 /* Aborted Message In Mailbox 14 */ +#define AA15 0x8000 /* Aborted Message In Mailbox 15 */ + +/* CAN_AA2 Masks */ +#define AA16 0x0001 /* Aborted Message In Mailbox 16 */ +#define AA17 0x0002 /* Aborted Message In Mailbox 17 */ +#define AA18 0x0004 /* Aborted Message In Mailbox 18 */ +#define AA19 0x0008 /* Aborted Message In Mailbox 19 */ +#define AA20 0x0010 /* Aborted Message In Mailbox 20 */ +#define AA21 0x0020 /* Aborted Message In Mailbox 21 */ +#define AA22 0x0040 /* Aborted Message In Mailbox 22 */ +#define AA23 0x0080 /* Aborted Message In Mailbox 23 */ +#define AA24 0x0100 /* Aborted Message In Mailbox 24 */ +#define AA25 0x0200 /* Aborted Message In Mailbox 25 */ +#define AA26 0x0400 /* Aborted Message In Mailbox 26 */ +#define AA27 0x0800 /* Aborted Message In Mailbox 27 */ +#define AA28 0x1000 /* Aborted Message In Mailbox 28 */ +#define AA29 0x2000 /* Aborted Message In Mailbox 29 */ +#define AA30 0x4000 /* Aborted Message In Mailbox 30 */ +#define AA31 0x8000 /* Aborted Message In Mailbox 31 */ + +/* CAN_TA1 Masks */ +#define TA0 0x0001 /* Transmit Successful From Mailbox 0 */ +#define TA1 0x0002 /* Transmit Successful From Mailbox 1 */ +#define TA2 0x0004 /* Transmit Successful From Mailbox 2 */ +#define TA3 0x0008 /* Transmit Successful From Mailbox 3 */ +#define TA4 0x0010 /* Transmit Successful From Mailbox 4 */ +#define TA5 0x0020 /* Transmit Successful From Mailbox 5 */ +#define TA6 0x0040 /* Transmit Successful From Mailbox 6 */ +#define TA7 0x0080 /* Transmit Successful From Mailbox 7 */ +#define TA8 0x0100 /* Transmit Successful From Mailbox 8 */ +#define TA9 0x0200 /* Transmit Successful From Mailbox 9 */ +#define TA10 0x0400 /* Transmit Successful From Mailbox 10 */ +#define TA11 0x0800 /* Transmit Successful From Mailbox 11 */ +#define TA12 0x1000 /* Transmit Successful From Mailbox 12 */ +#define TA13 0x2000 /* Transmit Successful From Mailbox 13 */ +#define TA14 0x4000 /* Transmit Successful From Mailbox 14 */ +#define TA15 0x8000 /* Transmit Successful From Mailbox 15 */ + +/* CAN_TA2 Masks */ +#define TA16 0x0001 /* Transmit Successful From Mailbox 16 */ +#define TA17 0x0002 /* Transmit Successful From Mailbox 17 */ +#define TA18 0x0004 /* Transmit Successful From Mailbox 18 */ +#define TA19 0x0008 /* Transmit Successful From Mailbox 19 */ +#define TA20 0x0010 /* Transmit Successful From Mailbox 20 */ +#define TA21 0x0020 /* Transmit Successful From Mailbox 21 */ +#define TA22 0x0040 /* Transmit Successful From Mailbox 22 */ +#define TA23 0x0080 /* Transmit Successful From Mailbox 23 */ +#define TA24 0x0100 /* Transmit Successful From Mailbox 24 */ +#define TA25 0x0200 /* Transmit Successful From Mailbox 25 */ +#define TA26 0x0400 /* Transmit Successful From Mailbox 26 */ +#define TA27 0x0800 /* Transmit Successful From Mailbox 27 */ +#define TA28 0x1000 /* Transmit Successful From Mailbox 28 */ +#define TA29 0x2000 /* Transmit Successful From Mailbox 29 */ +#define TA30 0x4000 /* Transmit Successful From Mailbox 30 */ +#define TA31 0x8000 /* Transmit Successful From Mailbox 31 */ + +/* CAN_MBTD Masks */ +#define TDPTR 0x001F /* Mailbox To Temporarily Disable */ +#define TDA 0x0040 /* Temporary Disable Acknowledge */ +#define TDR 0x0080 /* Temporary Disable Request */ + +/* CAN_RFH1 Masks */ +#define RFH0 0x0001 /* Enable Automatic Remote Frame Handling For Mailbox 0 */ +#define RFH1 0x0002 /* Enable Automatic Remote Frame Handling For Mailbox 1 */ +#define RFH2 0x0004 /* Enable Automatic Remote Frame Handling For Mailbox 2 */ +#define RFH3 0x0008 /* Enable Automatic Remote Frame Handling For Mailbox 3 */ +#define RFH4 0x0010 /* Enable Automatic Remote Frame Handling For Mailbox 4 */ +#define RFH5 0x0020 /* Enable Automatic Remote Frame Handling For Mailbox 5 */ +#define RFH6 0x0040 /* Enable Automatic Remote Frame Handling For Mailbox 6 */ +#define RFH7 0x0080 /* Enable Automatic Remote Frame Handling For Mailbox 7 */ +#define RFH8 0x0100 /* Enable Automatic Remote Frame Handling For Mailbox 8 */ +#define RFH9 0x0200 /* Enable Automatic Remote Frame Handling For Mailbox 9 */ +#define RFH10 0x0400 /* Enable Automatic Remote Frame Handling For Mailbox 10 */ +#define RFH11 0x0800 /* Enable Automatic Remote Frame Handling For Mailbox 11 */ +#define RFH12 0x1000 /* Enable Automatic Remote Frame Handling For Mailbox 12 */ +#define RFH13 0x2000 /* Enable Automatic Remote Frame Handling For Mailbox 13 */ +#define RFH14 0x4000 /* Enable Automatic Remote Frame Handling For Mailbox 14 */ +#define RFH15 0x8000 /* Enable Automatic Remote Frame Handling For Mailbox 15 */ + +/* CAN_RFH2 Masks */ +#define RFH16 0x0001 /* Enable Automatic Remote Frame Handling For Mailbox 16 */ +#define RFH17 0x0002 /* Enable Automatic Remote Frame Handling For Mailbox 17 */ +#define RFH18 0x0004 /* Enable Automatic Remote Frame Handling For Mailbox 18 */ +#define RFH19 0x0008 /* Enable Automatic Remote Frame Handling For Mailbox 19 */ +#define RFH20 0x0010 /* Enable Automatic Remote Frame Handling For Mailbox 20 */ +#define RFH21 0x0020 /* Enable Automatic Remote Frame Handling For Mailbox 21 */ +#define RFH22 0x0040 /* Enable Automatic Remote Frame Handling For Mailbox 22 */ +#define RFH23 0x0080 /* Enable Automatic Remote Frame Handling For Mailbox 23 */ +#define RFH24 0x0100 /* Enable Automatic Remote Frame Handling For Mailbox 24 */ +#define RFH25 0x0200 /* Enable Automatic Remote Frame Handling For Mailbox 25 */ +#define RFH26 0x0400 /* Enable Automatic Remote Frame Handling For Mailbox 26 */ +#define RFH27 0x0800 /* Enable Automatic Remote Frame Handling For Mailbox 27 */ +#define RFH28 0x1000 /* Enable Automatic Remote Frame Handling For Mailbox 28 */ +#define RFH29 0x2000 /* Enable Automatic Remote Frame Handling For Mailbox 29 */ +#define RFH30 0x4000 /* Enable Automatic Remote Frame Handling For Mailbox 30 */ +#define RFH31 0x8000 /* Enable Automatic Remote Frame Handling For Mailbox 31 */ + +/* CAN_MBTIF1 Masks */ +#define MBTIF0 0x0001 /* TX Interrupt Active In Mailbox 0 */ +#define MBTIF1 0x0002 /* TX Interrupt Active In Mailbox 1 */ +#define MBTIF2 0x0004 /* TX Interrupt Active In Mailbox 2 */ +#define MBTIF3 0x0008 /* TX Interrupt Active In Mailbox 3 */ +#define MBTIF4 0x0010 /* TX Interrupt Active In Mailbox 4 */ +#define MBTIF5 0x0020 /* TX Interrupt Active In Mailbox 5 */ +#define MBTIF6 0x0040 /* TX Interrupt Active In Mailbox 6 */ +#define MBTIF7 0x0080 /* TX Interrupt Active In Mailbox 7 */ +#define MBTIF8 0x0100 /* TX Interrupt Active In Mailbox 8 */ +#define MBTIF9 0x0200 /* TX Interrupt Active In Mailbox 9 */ +#define MBTIF10 0x0400 /* TX Interrupt Active In Mailbox 10 */ +#define MBTIF11 0x0800 /* TX Interrupt Active In Mailbox 11 */ +#define MBTIF12 0x1000 /* TX Interrupt Active In Mailbox 12 */ +#define MBTIF13 0x2000 /* TX Interrupt Active In Mailbox 13 */ +#define MBTIF14 0x4000 /* TX Interrupt Active In Mailbox 14 */ +#define MBTIF15 0x8000 /* TX Interrupt Active In Mailbox 15 */ + +/* CAN_MBTIF2 Masks */ +#define MBTIF16 0x0001 /* TX Interrupt Active In Mailbox 16 */ +#define MBTIF17 0x0002 /* TX Interrupt Active In Mailbox 17 */ +#define MBTIF18 0x0004 /* TX Interrupt Active In Mailbox 18 */ +#define MBTIF19 0x0008 /* TX Interrupt Active In Mailbox 19 */ +#define MBTIF20 0x0010 /* TX Interrupt Active In Mailbox 20 */ +#define MBTIF21 0x0020 /* TX Interrupt Active In Mailbox 21 */ +#define MBTIF22 0x0040 /* TX Interrupt Active In Mailbox 22 */ +#define MBTIF23 0x0080 /* TX Interrupt Active In Mailbox 23 */ +#define MBTIF24 0x0100 /* TX Interrupt Active In Mailbox 24 */ +#define MBTIF25 0x0200 /* TX Interrupt Active In Mailbox 25 */ +#define MBTIF26 0x0400 /* TX Interrupt Active In Mailbox 26 */ +#define MBTIF27 0x0800 /* TX Interrupt Active In Mailbox 27 */ +#define MBTIF28 0x1000 /* TX Interrupt Active In Mailbox 28 */ +#define MBTIF29 0x2000 /* TX Interrupt Active In Mailbox 29 */ +#define MBTIF30 0x4000 /* TX Interrupt Active In Mailbox 30 */ +#define MBTIF31 0x8000 /* TX Interrupt Active In Mailbox 31 */ + +/* CAN_MBRIF1 Masks */ +#define MBRIF0 0x0001 /* RX Interrupt Active In Mailbox 0 */ +#define MBRIF1 0x0002 /* RX Interrupt Active In Mailbox 1 */ +#define MBRIF2 0x0004 /* RX Interrupt Active In Mailbox 2 */ +#define MBRIF3 0x0008 /* RX Interrupt Active In Mailbox 3 */ +#define MBRIF4 0x0010 /* RX Interrupt Active In Mailbox 4 */ +#define MBRIF5 0x0020 /* RX Interrupt Active In Mailbox 5 */ +#define MBRIF6 0x0040 /* RX Interrupt Active In Mailbox 6 */ +#define MBRIF7 0x0080 /* RX Interrupt Active In Mailbox 7 */ +#define MBRIF8 0x0100 /* RX Interrupt Active In Mailbox 8 */ +#define MBRIF9 0x0200 /* RX Interrupt Active In Mailbox 9 */ +#define MBRIF10 0x0400 /* RX Interrupt Active In Mailbox 10 */ +#define MBRIF11 0x0800 /* RX Interrupt Active In Mailbox 11 */ +#define MBRIF12 0x1000 /* RX Interrupt Active In Mailbox 12 */ +#define MBRIF13 0x2000 /* RX Interrupt Active In Mailbox 13 */ +#define MBRIF14 0x4000 /* RX Interrupt Active In Mailbox 14 */ +#define MBRIF15 0x8000 /* RX Interrupt Active In Mailbox 15 */ + +/* CAN_MBRIF2 Masks */ +#define MBRIF16 0x0001 /* RX Interrupt Active In Mailbox 16 */ +#define MBRIF17 0x0002 /* RX Interrupt Active In Mailbox 17 */ +#define MBRIF18 0x0004 /* RX Interrupt Active In Mailbox 18 */ +#define MBRIF19 0x0008 /* RX Interrupt Active In Mailbox 19 */ +#define MBRIF20 0x0010 /* RX Interrupt Active In Mailbox 20 */ +#define MBRIF21 0x0020 /* RX Interrupt Active In Mailbox 21 */ +#define MBRIF22 0x0040 /* RX Interrupt Active In Mailbox 22 */ +#define MBRIF23 0x0080 /* RX Interrupt Active In Mailbox 23 */ +#define MBRIF24 0x0100 /* RX Interrupt Active In Mailbox 24 */ +#define MBRIF25 0x0200 /* RX Interrupt Active In Mailbox 25 */ +#define MBRIF26 0x0400 /* RX Interrupt Active In Mailbox 26 */ +#define MBRIF27 0x0800 /* RX Interrupt Active In Mailbox 27 */ +#define MBRIF28 0x1000 /* RX Interrupt Active In Mailbox 28 */ +#define MBRIF29 0x2000 /* RX Interrupt Active In Mailbox 29 */ +#define MBRIF30 0x4000 /* RX Interrupt Active In Mailbox 30 */ +#define MBRIF31 0x8000 /* RX Interrupt Active In Mailbox 31 */ + +/* CAN_MBIM1 Masks */ +#define MBIM0 0x0001 /* Enable Interrupt For Mailbox 0 */ +#define MBIM1 0x0002 /* Enable Interrupt For Mailbox 1 */ +#define MBIM2 0x0004 /* Enable Interrupt For Mailbox 2 */ +#define MBIM3 0x0008 /* Enable Interrupt For Mailbox 3 */ +#define MBIM4 0x0010 /* Enable Interrupt For Mailbox 4 */ +#define MBIM5 0x0020 /* Enable Interrupt For Mailbox 5 */ +#define MBIM6 0x0040 /* Enable Interrupt For Mailbox 6 */ +#define MBIM7 0x0080 /* Enable Interrupt For Mailbox 7 */ +#define MBIM8 0x0100 /* Enable Interrupt For Mailbox 8 */ +#define MBIM9 0x0200 /* Enable Interrupt For Mailbox 9 */ +#define MBIM10 0x0400 /* Enable Interrupt For Mailbox 10 */ +#define MBIM11 0x0800 /* Enable Interrupt For Mailbox 11 */ +#define MBIM12 0x1000 /* Enable Interrupt For Mailbox 12 */ +#define MBIM13 0x2000 /* Enable Interrupt For Mailbox 13 */ +#define MBIM14 0x4000 /* Enable Interrupt For Mailbox 14 */ +#define MBIM15 0x8000 /* Enable Interrupt For Mailbox 15 */ + +/* CAN_MBIM2 Masks */ +#define MBIM16 0x0001 /* Enable Interrupt For Mailbox 16 */ +#define MBIM17 0x0002 /* Enable Interrupt For Mailbox 17 */ +#define MBIM18 0x0004 /* Enable Interrupt For Mailbox 18 */ +#define MBIM19 0x0008 /* Enable Interrupt For Mailbox 19 */ +#define MBIM20 0x0010 /* Enable Interrupt For Mailbox 20 */ +#define MBIM21 0x0020 /* Enable Interrupt For Mailbox 21 */ +#define MBIM22 0x0040 /* Enable Interrupt For Mailbox 22 */ +#define MBIM23 0x0080 /* Enable Interrupt For Mailbox 23 */ +#define MBIM24 0x0100 /* Enable Interrupt For Mailbox 24 */ +#define MBIM25 0x0200 /* Enable Interrupt For Mailbox 25 */ +#define MBIM26 0x0400 /* Enable Interrupt For Mailbox 26 */ +#define MBIM27 0x0800 /* Enable Interrupt For Mailbox 27 */ +#define MBIM28 0x1000 /* Enable Interrupt For Mailbox 28 */ +#define MBIM29 0x2000 /* Enable Interrupt For Mailbox 29 */ +#define MBIM30 0x4000 /* Enable Interrupt For Mailbox 30 */ +#define MBIM31 0x8000 /* Enable Interrupt For Mailbox 31 */ + +/* CAN_GIM Masks */ +#define EWTIM 0x0001 /* Enable TX Error Count Interrupt */ +#define EWRIM 0x0002 /* Enable RX Error Count Interrupt */ +#define EPIM 0x0004 /* Enable Error-Passive Mode Interrupt */ +#define BOIM 0x0008 /* Enable Bus Off Interrupt */ +#define WUIM 0x0010 /* Enable Wake-Up Interrupt */ +#define UIAIM 0x0020 /* Enable Access To Unimplemented Address Interrupt */ +#define AAIM 0x0040 /* Enable Abort Acknowledge Interrupt */ +#define RMLIM 0x0080 /* Enable RX Message Lost Interrupt */ +#define UCEIM 0x0100 /* Enable Universal Counter Overflow Interrupt */ +#define EXTIM 0x0200 /* Enable External Trigger Output Interrupt */ +#define ADIM 0x0400 /* Enable Access Denied Interrupt */ + +/* CAN_GIS Masks */ +#define EWTIS 0x0001 /* TX Error Count IRQ Status */ +#define EWRIS 0x0002 /* RX Error Count IRQ Status */ +#define EPIS 0x0004 /* Error-Passive Mode IRQ Status */ +#define BOIS 0x0008 /* Bus Off IRQ Status */ +#define WUIS 0x0010 /* Wake-Up IRQ Status */ +#define UIAIS 0x0020 /* Access To Unimplemented Address IRQ Status */ +#define AAIS 0x0040 /* Abort Acknowledge IRQ Status */ +#define RMLIS 0x0080 /* RX Message Lost IRQ Status */ +#define UCEIS 0x0100 /* Universal Counter Overflow IRQ Status */ +#define EXTIS 0x0200 /* External Trigger Output IRQ Status */ +#define ADIS 0x0400 /* Access Denied IRQ Status */ + +/* CAN_GIF Masks */ +#define EWTIF 0x0001 /* TX Error Count IRQ Flag */ +#define EWRIF 0x0002 /* RX Error Count IRQ Flag */ +#define EPIF 0x0004 /* Error-Passive Mode IRQ Flag */ +#define BOIF 0x0008 /* Bus Off IRQ Flag */ +#define WUIF 0x0010 /* Wake-Up IRQ Flag */ +#define UIAIF 0x0020 /* Access To Unimplemented Address IRQ Flag */ +#define AAIF 0x0040 /* Abort Acknowledge IRQ Flag */ +#define RMLIF 0x0080 /* RX Message Lost IRQ Flag */ +#define UCEIF 0x0100 /* Universal Counter Overflow IRQ Flag */ +#define EXTIF 0x0200 /* External Trigger Output IRQ Flag */ +#define ADIF 0x0400 /* Access Denied IRQ Flag */ + +/* CAN_UCCNF Masks */ +#define UCCNF 0x000F /* Universal Counter Mode */ +#define UC_STAMP 0x0001 /* Timestamp Mode */ +#define UC_WDOG 0x0002 /* Watchdog Mode */ +#define UC_AUTOTX 0x0003 /* Auto-Transmit Mode */ +#define UC_ERROR 0x0006 /* CAN Error Frame Count */ +#define UC_OVER 0x0007 /* CAN Overload Frame Count */ +#define UC_LOST 0x0008 /* Arbitration Lost During TX Count */ +#define UC_AA 0x0009 /* TX Abort Count */ +#define UC_TA 0x000A /* TX Successful Count */ +#define UC_REJECT 0x000B /* RX Message Rejected Count */ +#define UC_RML 0x000C /* RX Message Lost Count */ +#define UC_RX 0x000D /* Total Successful RX Messages Count */ +#define UC_RMP 0x000E /* Successful RX W/Matching ID Count */ +#define UC_ALL 0x000F /* Correct Message On CAN Bus Line Count */ +#define UCRC 0x0020 /* Universal Counter Reload/Clear */ +#define UCCT 0x0040 /* Universal Counter CAN Trigger */ +#define UCE 0x0080 /* Universal Counter Enable */ + +/* CAN_ESR Masks */ +#define ACKE 0x0004 /* Acknowledge Error */ +#define SER 0x0008 /* Stuff Error */ +#define CRCE 0x0010 /* CRC Error */ +#define SA0 0x0020 /* Stuck At Dominant Error */ +#define BEF 0x0040 /* Bit Error Flag */ +#define FER 0x0080 /* Form Error Flag */ + +/* CAN_EWR Masks */ +#define EWLREC 0x00FF /* RX Error Count Limit (For EWRIS) */ +#define EWLTEC 0xFF00 /* TX Error Count Limit (For EWTIS) */ + +#endif -- cgit v1.2.3 From 0c270807db7c6478d60ab2bc23511fd5868a7a80 Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Tue, 15 Dec 2009 20:57:43 +0000 Subject: Blackfin: drop duplicate HOTPLUG Kconfig option The option already exists for everyone in init/Kconfig. Signed-off-by: Mike Frysinger --- arch/blackfin/Kconfig | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index 1f075be7717f..3bd8e833c89e 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig @@ -1120,24 +1120,6 @@ config PCI source "drivers/pci/Kconfig" -config HOTPLUG - bool "Support for hot-pluggable device" - help - Say Y here if you want to plug devices into your computer while - the system is running, and be able to use them quickly. In many - cases, the devices can likewise be unplugged at any time too. - - One well known example of this is PCMCIA- or PC-cards, credit-card - size devices such as network cards, modems or hard drives which are - plugged into slots found on all modern laptop computers. Another - example, used on modern desktops as well as laptops, is USB. - - Enable HOTPLUG and build a modular kernel. Get agent software - (from ) and install it. - Then your kernel will automatically call out to a user mode "policy - agent" (/sbin/hotplug) to load modules and set up software needed - to use devices as you hotplug them. - source "drivers/pcmcia/Kconfig" source "drivers/pci/hotplug/Kconfig" -- cgit v1.2.3 From a3a6a590190ca77aedf9de871d941c1139a5a24c Mon Sep 17 00:00:00 2001 From: Sonic Zhang Date: Wed, 16 Dec 2009 07:52:52 +0000 Subject: Blackfin: dma-mapping: fix thinko in constant optimization Make sure the non-constant version of the dma_sync functions actually complete instead of recursively calling itself forever. Signed-off-by: Sonic Zhang Signed-off-by: Mike Frysinger --- arch/blackfin/include/asm/dma-mapping.h | 15 +++++++++------ arch/blackfin/kernel/dma-mapping.c | 2 +- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/arch/blackfin/include/asm/dma-mapping.h b/arch/blackfin/include/asm/dma-mapping.h index f9172ff30e5c..e63f6d9fdab8 100644 --- a/arch/blackfin/include/asm/dma-mapping.h +++ b/arch/blackfin/include/asm/dma-mapping.h @@ -44,13 +44,8 @@ dma_mapping_error(struct device *dev, dma_addr_t dma_addr) extern void __dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir); static inline void -_dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir) +__dma_sync_inline(dma_addr_t addr, size_t size, enum dma_data_direction dir) { - if (!__builtin_constant_p(dir)) { - __dma_sync(addr, size, dir); - return; - } - switch (dir) { case DMA_NONE: BUG(); @@ -64,6 +59,14 @@ _dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir) break; } } +static inline void +_dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ + if (__builtin_constant_p(dir)) + __dma_sync_inline(addr, size, dir); + else + __dma_sync(addr, size, dir); +} /* * Map a single buffer of the indicated size for DMA in streaming mode. diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c index e937f323d82c..04ddcfeb7981 100644 --- a/arch/blackfin/kernel/dma-mapping.c +++ b/arch/blackfin/kernel/dma-mapping.c @@ -116,7 +116,7 @@ EXPORT_SYMBOL(dma_free_coherent); void __dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir) { - _dma_sync(addr, size, dir); + __dma_sync_inline(addr, size, dir); } EXPORT_SYMBOL(__dma_sync); -- cgit v1.2.3 From 15435a2a55a678b21b9bb75a7513b3c9ff69a68e Mon Sep 17 00:00:00 2001 From: Michael Hennerich Date: Wed, 16 Dec 2009 08:39:58 +0000 Subject: Blackfin: pull in asm/bfin_can.h for interrupt masks Signed-off-by: Michael Hennerich Signed-off-by: Mike Frysinger --- arch/blackfin/mach-common/ints-priority.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c index 1873b2c1fede..5202a6076695 100644 --- a/arch/blackfin/mach-common/ints-priority.c +++ b/arch/blackfin/mach-common/ints-priority.c @@ -28,6 +28,7 @@ #include #include #include +#include #define SIC_SYSIRQ(irq) (irq - (IRQ_CORETMR + 1)) -- cgit v1.2.3 From edd07992504e631b50d18d130c62addbcb16269e Mon Sep 17 00:00:00 2001 From: Michael Hennerich Date: Wed, 16 Dec 2009 08:45:17 +0000 Subject: Blackfin: gpio: use shorter name The gpio label size is 16 char, but the current code uses a longer name resulting in chopped display. So use a shorter name. Reported-by: Peter Meerwald Signed-off-by: Michael Hennerich Signed-off-by: Mike Frysinger --- arch/blackfin/kernel/bfin_gpio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/blackfin/kernel/bfin_gpio.c b/arch/blackfin/kernel/bfin_gpio.c index a174596cc009..0dd9cf913503 100644 --- a/arch/blackfin/kernel/bfin_gpio.c +++ b/arch/blackfin/kernel/bfin_gpio.c @@ -1320,7 +1320,7 @@ void bfin_gpiolib_gpio_free(struct gpio_chip *chip, unsigned gpio) } static struct gpio_chip bfin_chip = { - .label = "Blackfin-GPIOlib", + .label = "BFIN-GPIO", .direction_input = bfin_gpiolib_direction_input, .get = bfin_gpiolib_get_value, .direction_output = bfin_gpiolib_direction_output, -- cgit v1.2.3 From 244d34230b7447fba95221dbf39b39e94257939c Mon Sep 17 00:00:00 2001 From: Michael Hennerich Date: Fri, 18 Dec 2009 09:29:39 +0000 Subject: Blackfin: update AD7879 resources to match the new gpiolib driver support Signed-off-by: Michael Hennerich Signed-off-by: Mike Frysinger --- arch/blackfin/mach-bf527/boards/ezbrd.c | 4 ++-- arch/blackfin/mach-bf527/boards/ezkit.c | 3 +-- arch/blackfin/mach-bf537/boards/stamp.c | 4 ++-- arch/blackfin/mach-bf538/boards/ezkit.c | 4 ++-- 4 files changed, 7 insertions(+), 8 deletions(-) diff --git a/arch/blackfin/mach-bf527/boards/ezbrd.c b/arch/blackfin/mach-bf527/boards/ezbrd.c index faede2b964ba..55069af4f67d 100644 --- a/arch/blackfin/mach-bf527/boards/ezbrd.c +++ b/arch/blackfin/mach-bf527/boards/ezbrd.c @@ -274,8 +274,8 @@ static const struct ad7879_platform_data bfin_ad7879_ts_info = { .median = 2, /* do 8 measurements */ .averaging = 1, /* take the average of 4 middle samples */ .pen_down_acc_interval = 255, /* 9.4 ms */ - .gpio_output = 1, /* configure AUX/VBAT/GPIO as GPIO output */ - .gpio_default = 1, /* During initialization set GPIO = HIGH */ + .gpio_export = 1, /* Export GPIO to gpiolib */ + .gpio_base = -1, /* Dynamic allocation */ }; #endif diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c index fdfe8cae49bc..0cc3581e6461 100644 --- a/arch/blackfin/mach-bf527/boards/ezkit.c +++ b/arch/blackfin/mach-bf527/boards/ezkit.c @@ -521,8 +521,7 @@ static const struct ad7879_platform_data bfin_ad7879_ts_info = { .median = 2, /* do 8 measurements */ .averaging = 1, /* take the average of 4 middle samples */ .pen_down_acc_interval = 255, /* 9.4 ms */ - .gpio_output = 1, /* configure AUX/VBAT/GPIO as GPIO output */ - .gpio_default = 1, /* During initialization set GPIO = HIGH */ + .gpio_export = 0, /* Export GPIO to gpiolib */ }; #endif diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c index 538bff83e607..03e36bd6fd05 100644 --- a/arch/blackfin/mach-bf537/boards/stamp.c +++ b/arch/blackfin/mach-bf537/boards/stamp.c @@ -705,8 +705,8 @@ static const struct ad7879_platform_data bfin_ad7879_ts_info = { .median = 2, /* do 8 measurements */ .averaging = 1, /* take the average of 4 middle samples */ .pen_down_acc_interval = 255, /* 9.4 ms */ - .gpio_output = 1, /* configure AUX/VBAT/GPIO as GPIO output */ - .gpio_default = 1, /* During initialization set GPIO = HIGH */ + .gpio_export = 1, /* Export GPIO to gpiolib */ + .gpio_base = -1, /* Dynamic allocation */ }; #endif diff --git a/arch/blackfin/mach-bf538/boards/ezkit.c b/arch/blackfin/mach-bf538/boards/ezkit.c index d2d06f046ad0..1a1f65855b03 100644 --- a/arch/blackfin/mach-bf538/boards/ezkit.c +++ b/arch/blackfin/mach-bf538/boards/ezkit.c @@ -518,8 +518,8 @@ static const struct ad7879_platform_data bfin_ad7879_ts_info = { .median = 2, /* do 8 measurements */ .averaging = 1, /* take the average of 4 middle samples */ .pen_down_acc_interval = 255, /* 9.4 ms */ - .gpio_output = 1, /* configure AUX/VBAT/GPIO as GPIO output */ - .gpio_default = 1, /* During initialization set GPIO = HIGH */ + .gpio_export = 1, /* Export GPIO to gpiolib */ + .gpio_base = -1, /* Dynamic allocation */ }; #endif -- cgit v1.2.3 From 1dafdc513a20410e328b22b6d25d4ac78557516a Mon Sep 17 00:00:00 2001 From: Michael Hennerich Date: Fri, 18 Dec 2009 12:30:04 +0000 Subject: Blackfin: BF51x: fix L1 Instruction SRAM size The BF51x's Instruction SRAM is 32kB, not 48kB. Signed-off-by: Michael Hennerich Signed-off-by: Mike Frysinger --- arch/blackfin/mach-bf518/include/mach/mem_map.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/blackfin/mach-bf518/include/mach/mem_map.h b/arch/blackfin/mach-bf518/include/mach/mem_map.h index 3c6777cb3532..073b5d73d391 100644 --- a/arch/blackfin/mach-bf518/include/mach/mem_map.h +++ b/arch/blackfin/mach-bf518/include/mach/mem_map.h @@ -41,7 +41,7 @@ #define L1_DATA_A_START 0xFF800000 #define L1_DATA_B_START 0xFF900000 -#define L1_CODE_LENGTH 0xC000 +#define L1_CODE_LENGTH 0x8000 #ifdef CONFIG_BFIN_DCACHE -- cgit v1.2.3 From 3b781de6dd96d64c133a7ed774f385d35129575b Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Mon, 21 Dec 2009 15:04:03 +0000 Subject: Blackfin: drop unused SL811 platform resources from bf527/pnav boards These platforms don't hook up to this USB controller, so no point in declaring resources for it. Signed-off-by: Mike Frysinger --- arch/blackfin/mach-bf527/boards/cm_bf527.c | 49 ----------------------------- arch/blackfin/mach-bf527/boards/ezkit.c | 49 ----------------------------- arch/blackfin/mach-bf537/boards/pnav10.c | 50 ------------------------------ 3 files changed, 148 deletions(-) diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c index eaa0131f9605..d6efdfa7d4e0 100644 --- a/arch/blackfin/mach-bf527/boards/cm_bf527.c +++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include @@ -270,50 +269,6 @@ static struct platform_device dm9000_device = { }; #endif -#if defined(CONFIG_USB_SL811_HCD) || defined(CONFIG_USB_SL811_HCD_MODULE) -static struct resource sl811_hcd_resources[] = { - { - .start = 0x20340000, - .end = 0x20340000, - .flags = IORESOURCE_MEM, - }, { - .start = 0x20340004, - .end = 0x20340004, - .flags = IORESOURCE_MEM, - }, { - .start = CONFIG_USB_SL811_BFIN_IRQ, - .end = CONFIG_USB_SL811_BFIN_IRQ, - .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, - }, -}; - -#if defined(CONFIG_USB_SL811_BFIN_USE_VBUS) -void sl811_port_power(struct device *dev, int is_on) -{ - gpio_request(CONFIG_USB_SL811_BFIN_GPIO_VBUS, "usb:SL811_VBUS"); - gpio_direction_output(CONFIG_USB_SL811_BFIN_GPIO_VBUS, is_on); -} -#endif - -static struct sl811_platform_data sl811_priv = { - .potpg = 10, - .power = 250, /* == 500mA */ -#if defined(CONFIG_USB_SL811_BFIN_USE_VBUS) - .port_power = &sl811_port_power, -#endif -}; - -static struct platform_device sl811_hcd_device = { - .name = "sl811-hcd", - .id = 0, - .dev = { - .platform_data = &sl811_priv, - }, - .num_resources = ARRAY_SIZE(sl811_hcd_resources), - .resource = sl811_hcd_resources, -}; -#endif - #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) static struct platform_device bfin_mii_bus = { .name = "bfin_mii_bus", @@ -929,10 +884,6 @@ static struct platform_device *cmbf527_devices[] __initdata = { &rtc_device, #endif -#if defined(CONFIG_USB_SL811_HCD) || defined(CONFIG_USB_SL811_HCD_MODULE) - &sl811_hcd_device, -#endif - #if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE) &bfin_isp1760_device, #endif diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c index 0cc3581e6461..a7beed517c2e 100644 --- a/arch/blackfin/mach-bf527/boards/ezkit.c +++ b/arch/blackfin/mach-bf527/boards/ezkit.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include @@ -359,50 +358,6 @@ static struct platform_device dm9000_device = { }; #endif -#if defined(CONFIG_USB_SL811_HCD) || defined(CONFIG_USB_SL811_HCD_MODULE) -static struct resource sl811_hcd_resources[] = { - { - .start = 0x20340000, - .end = 0x20340000, - .flags = IORESOURCE_MEM, - }, { - .start = 0x20340004, - .end = 0x20340004, - .flags = IORESOURCE_MEM, - }, { - .start = CONFIG_USB_SL811_BFIN_IRQ, - .end = CONFIG_USB_SL811_BFIN_IRQ, - .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, - }, -}; - -#if defined(CONFIG_USB_SL811_BFIN_USE_VBUS) -void sl811_port_power(struct device *dev, int is_on) -{ - gpio_request(CONFIG_USB_SL811_BFIN_GPIO_VBUS, "usb:SL811_VBUS"); - gpio_direction_output(CONFIG_USB_SL811_BFIN_GPIO_VBUS, is_on); -} -#endif - -static struct sl811_platform_data sl811_priv = { - .potpg = 10, - .power = 250, /* == 500mA */ -#if defined(CONFIG_USB_SL811_BFIN_USE_VBUS) - .port_power = &sl811_port_power, -#endif -}; - -static struct platform_device sl811_hcd_device = { - .name = "sl811-hcd", - .id = 0, - .dev = { - .platform_data = &sl811_priv, - }, - .num_resources = ARRAY_SIZE(sl811_hcd_resources), - .resource = sl811_hcd_resources, -}; -#endif - #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) static struct platform_device bfin_mii_bus = { .name = "bfin_mii_bus", @@ -1113,10 +1068,6 @@ static struct platform_device *stamp_devices[] __initdata = { &rtc_device, #endif -#if defined(CONFIG_USB_SL811_HCD) || defined(CONFIG_USB_SL811_HCD_MODULE) - &sl811_hcd_device, -#endif - #if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE) &bfin_isp1760_device, #endif diff --git a/arch/blackfin/mach-bf537/boards/pnav10.c b/arch/blackfin/mach-bf537/boards/pnav10.c index f08533d4aee8..a0f64ff0fb55 100644 --- a/arch/blackfin/mach-bf537/boards/pnav10.c +++ b/arch/blackfin/mach-bf537/boards/pnav10.c @@ -17,7 +17,6 @@ #include #include #include -#include #include @@ -99,51 +98,6 @@ static struct platform_device smc91x_device = { }; #endif -#if defined(CONFIG_USB_SL811_HCD) || defined(CONFIG_USB_SL811_HCD_MODULE) -static struct resource sl811_hcd_resources[] = { - { - .start = 0x20340000, - .end = 0x20340000, - .flags = IORESOURCE_MEM, - }, { - .start = 0x20340004, - .end = 0x20340004, - .flags = IORESOURCE_MEM, - }, { - .start = CONFIG_USB_SL811_BFIN_IRQ, - .end = CONFIG_USB_SL811_BFIN_IRQ, - .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, - }, -}; - -#if defined(CONFIG_USB_SL811_BFIN_USE_VBUS) -void sl811_port_power(struct device *dev, int is_on) -{ - gpio_request(CONFIG_USB_SL811_BFIN_GPIO_VBUS, "usb:SL811_VBUS"); - gpio_direction_output(CONFIG_USB_SL811_BFIN_GPIO_VBUS, is_on); - -} -#endif - -static struct sl811_platform_data sl811_priv = { - .potpg = 10, - .power = 250, /* == 500mA */ -#if defined(CONFIG_USB_SL811_BFIN_USE_VBUS) - .port_power = &sl811_port_power, -#endif -}; - -static struct platform_device sl811_hcd_device = { - .name = "sl811-hcd", - .id = 0, - .dev = { - .platform_data = &sl811_priv, - }, - .num_resources = ARRAY_SIZE(sl811_hcd_resources), - .resource = sl811_hcd_resources, -}; -#endif - #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) static struct platform_device bfin_mii_bus = { .name = "bfin_mii_bus", @@ -514,10 +468,6 @@ static struct platform_device *stamp_devices[] __initdata = { &rtc_device, #endif -#if defined(CONFIG_USB_SL811_HCD) || defined(CONFIG_USB_SL811_HCD_MODULE) - &sl811_hcd_device, -#endif - #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) &smc91x_device, #endif -- cgit v1.2.3 From 01218654cd97a1e970034b1e080ec5459172c3f6 Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Mon, 21 Dec 2009 15:07:43 +0000 Subject: Blackfin: bf537-stamp: change CONFIG_USB_SL811_BFIN_IRQ to IRQ_PF4 The Kconfig option was never mainlined, so replace the define with the actual pin that it is hooked up to by default. Signed-off-by: Mike Frysinger --- arch/blackfin/mach-bf537/boards/stamp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c index 03e36bd6fd05..34022367f518 100644 --- a/arch/blackfin/mach-bf537/boards/stamp.c +++ b/arch/blackfin/mach-bf537/boards/stamp.c @@ -208,8 +208,8 @@ static struct resource sl811_hcd_resources[] = { .end = 0x20340004, .flags = IORESOURCE_MEM, }, { - .start = CONFIG_USB_SL811_BFIN_IRQ, - .end = CONFIG_USB_SL811_BFIN_IRQ, + .start = IRQ_PF4, + .end = IRQ_PF4, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; -- cgit v1.2.3 From 7a9cc48907e05e61033b953a3860e55703625950 Mon Sep 17 00:00:00 2001 From: Sonic Zhang Date: Tue, 22 Dec 2009 04:47:04 +0000 Subject: Blackfin: bf537-stamp: unify duplicated ADP5588 headers Signed-off-by: Sonic Zhang Signed-off-by: Mike Frysinger --- arch/blackfin/mach-bf537/boards/stamp.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c index 34022367f518..27b9c9ce8d06 100644 --- a/arch/blackfin/mach-bf537/boards/stamp.c +++ b/arch/blackfin/mach-bf537/boards/stamp.c @@ -20,10 +20,11 @@ #if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) #include #endif +#include +#include #include #include #include -#include #include #include #include @@ -1335,7 +1336,6 @@ static struct platform_device i2c_bfin_twi_device = { #endif #if defined(CONFIG_KEYBOARD_ADP5588) || defined(CONFIG_KEYBOARD_ADP5588_MODULE) -#include static const unsigned short adp5588_keymap[ADP5588_KEYMAPSIZE] = { [0] = KEY_GRAVE, [1] = KEY_1, @@ -1532,7 +1532,6 @@ static struct adp5520_platform_data adp5520_pdev_data = { #endif #if defined(CONFIG_GPIO_ADP5588) || defined(CONFIG_GPIO_ADP5588_MODULE) -#include static struct adp5588_gpio_platform_data adp5588_gpio_data = { .gpio_start = 50, .pullup_dis_mask = 0, -- cgit v1.2.3 From 252077cf7d44f1f261a5fdaedab88057f8379859 Mon Sep 17 00:00:00 2001 From: Valentin Yakovenkov Date: Tue, 22 Dec 2009 09:02:10 +0000 Subject: Blackfin: bf561-acvilon: save the smsc911x mac address Signed-off-by: Valentin Yakovenkov Signed-off-by: Mike Frysinger --- arch/blackfin/mach-bf561/boards/acvilon.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c index 07e8dc8770da..5163e2c383c5 100644 --- a/arch/blackfin/mach-bf561/boards/acvilon.c +++ b/arch/blackfin/mach-bf561/boards/acvilon.c @@ -176,7 +176,7 @@ static struct resource smsc911x_resources[] = { }; static struct smsc911x_platform_config smsc911x_config = { - .flags = SMSC911X_USE_32BIT, + .flags = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS, .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN, .phy_interface = PHY_INTERFACE_MODE_MII, -- cgit v1.2.3 From 0f7b468b6eace87ecdc59b3ec8476d50b0561ac2 Mon Sep 17 00:00:00 2001 From: Michael Hennerich Date: Tue, 22 Dec 2009 11:32:06 +0000 Subject: Blackfin: add optimized version of Hamming Weight functions Signed-off-by: Michael Hennerich Signed-off-by: Mike Frysinger --- arch/blackfin/Kconfig | 3 -- arch/blackfin/include/asm/bitops.h | 74 ++++++++++++++++++++++++++++---------- 2 files changed, 55 insertions(+), 22 deletions(-) diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index 3bd8e833c89e..b483639c80b3 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig @@ -45,9 +45,6 @@ config ZONE_DMA config GENERIC_FIND_NEXT_BIT def_bool y -config GENERIC_HWEIGHT - def_bool y - config GENERIC_HARDIRQS def_bool y diff --git a/arch/blackfin/include/asm/bitops.h b/arch/blackfin/include/asm/bitops.h index a2ff3fb3568d..605ba8e9b2e4 100644 --- a/arch/blackfin/include/asm/bitops.h +++ b/arch/blackfin/include/asm/bitops.h @@ -7,22 +7,41 @@ #ifndef _BLACKFIN_BITOPS_H #define _BLACKFIN_BITOPS_H -#ifndef CONFIG_SMP -# include -#else +#include + +#include +#include +#include +#include +#include +#include #ifndef _LINUX_BITOPS_H #error only can be included directly #endif -#include -#include /* swab32 */ - -#include -#include #include -#include +#include +#include +#include +#include +#include + +#ifndef CONFIG_SMP +#include + +/* + * clear_bit may not imply a memory barrier + */ +#ifndef smp_mb__before_clear_bit +#define smp_mb__before_clear_bit() smp_mb() +#define smp_mb__after_clear_bit() smp_mb() +#endif +#include +#include +#else +#include /* swab32 */ #include asmlinkage int __raw_bit_set_asm(volatile unsigned long *addr, int nr); @@ -89,19 +108,36 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) #include -#include -#include -#include +#endif /* CONFIG_SMP */ -#include -#include +/* + * hweightN: returns the hamming weight (i.e. the number + * of bits set) of a N-bit word + */ -#include +static inline unsigned int hweight32(unsigned int w) +{ + unsigned int res; -#include -#include -#include + __asm__ ("%0.l = ONES %0;" + "%0 = %0.l (Z);" + : "=d" (res) : "d" (w)); + return res; +} -#endif /* CONFIG_SMP */ +static inline unsigned int hweight64(__u64 w) +{ + return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w); +} + +static inline unsigned int hweight16(unsigned int w) +{ + return hweight32(w & 0xffff); +} + +static inline unsigned int hweight8(unsigned int w) +{ + return hweight32(w & 0xff); +} #endif /* _BLACKFIN_BITOPS_H */ -- cgit v1.2.3 From 0ea19c64e30f3688d1a890217c382db2b0ca411c Mon Sep 17 00:00:00 2001 From: Sonic Zhang Date: Wed, 23 Dec 2009 07:01:43 +0000 Subject: Blackfin: update defconfigs Signed-off-by: Sonic Zhang Signed-off-by: Barry Song Signed-off-by: Mike Frysinger --- arch/blackfin/configs/BF518F-EZBRD_defconfig | 270 +++++++++++++------- arch/blackfin/configs/BF526-EZBRD_defconfig | 336 +++++++++++++++---------- arch/blackfin/configs/BF527-EZKIT_defconfig | 344 ++++++++++++++++---------- arch/blackfin/configs/BF533-EZKIT_defconfig | 250 +++++++++++-------- arch/blackfin/configs/BF533-STAMP_defconfig | 295 +++++++++++++--------- arch/blackfin/configs/BF537-STAMP_defconfig | 332 +++++++++++++++---------- arch/blackfin/configs/BF538-EZKIT_defconfig | 291 ++++++++++++++-------- arch/blackfin/configs/BF548-EZKIT_defconfig | 108 +++++--- arch/blackfin/configs/BF561-ACVILON_defconfig | 15 +- arch/blackfin/configs/BF561-EZKIT_defconfig | 82 +++--- arch/blackfin/configs/H8606_defconfig | 7 - arch/blackfin/configs/PNAV-10_defconfig | 295 +++++++++++++--------- 12 files changed, 1608 insertions(+), 1017 deletions(-) diff --git a/arch/blackfin/configs/BF518F-EZBRD_defconfig b/arch/blackfin/configs/BF518F-EZBRD_defconfig index e31559419817..cf7c9bc94f13 100644 --- a/arch/blackfin/configs/BF518F-EZBRD_defconfig +++ b/arch/blackfin/configs/BF518F-EZBRD_defconfig @@ -1,22 +1,27 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.28.10 -# Thu May 21 05:50:01 2009 +# Linux kernel version: 2.6.32.2 # # CONFIG_MMU is not set # CONFIG_FPU is not set CONFIG_RWSEM_GENERIC_SPINLOCK=y # CONFIG_RWSEM_XCHGADD_ALGORITHM is not set CONFIG_BLACKFIN=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_BUG=y CONFIG_ZONE_DMA=y CONFIG_GENERIC_FIND_NEXT_BIT=y -CONFIG_GENERIC_HWEIGHT=y CONFIG_GENERIC_HARDIRQS=y CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y CONFIG_GENERIC_GPIO=y CONFIG_FORCE_MAX_ZONEORDER=14 CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y # # General setup @@ -26,22 +31,41 @@ CONFIG_BROKEN_ON_SMP=y CONFIG_INIT_ENV_ARG_LIMIT=32 CONFIG_LOCALVERSION="" CONFIG_LOCALVERSION_AUTO=y +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_BZIP2=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_BZIP2 is not set +# CONFIG_KERNEL_LZMA is not set CONFIG_SYSVIPC=y CONFIG_SYSVIPC_SYSCTL=y # CONFIG_POSIX_MQUEUE is not set # CONFIG_BSD_PROCESS_ACCT is not set # CONFIG_TASKSTATS is not set # CONFIG_AUDIT is not set + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_RCU_TRACE is not set +CONFIG_RCU_FANOUT=32 +# CONFIG_RCU_FANOUT_EXACT is not set +# CONFIG_TREE_RCU_TRACE is not set CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_CGROUPS is not set # CONFIG_GROUP_SCHED is not set +# CONFIG_CGROUPS is not set # CONFIG_SYSFS_DEPRECATED_V2 is not set # CONFIG_RELAY is not set # CONFIG_NAMESPACES is not set CONFIG_BLK_DEV_INITRD=y CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SYSCTL=y CONFIG_ANON_INODES=y @@ -62,6 +86,10 @@ CONFIG_EPOLL=y # CONFIG_TIMERFD is not set # CONFIG_EVENTFD is not set # CONFIG_AIO is not set + +# +# Kernel Performance Events And Counters +# CONFIG_VM_EVENT_COUNTERS=y CONFIG_COMPAT_BRK=y CONFIG_SLAB=y @@ -69,11 +97,15 @@ CONFIG_SLAB=y # CONFIG_SLOB is not set CONFIG_MMAP_ALLOW_UNINITIALIZED=y # CONFIG_PROFILING is not set -# CONFIG_MARKERS is not set CONFIG_HAVE_OPROFILE=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +# CONFIG_SLOW_WORK is not set # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set CONFIG_SLABINFO=y -CONFIG_TINY_SHMEM=y CONFIG_BASE_SMALL=0 CONFIG_MODULES=y # CONFIG_MODULE_FORCE_LOAD is not set @@ -81,11 +113,8 @@ CONFIG_MODULE_UNLOAD=y # CONFIG_MODULE_FORCE_UNLOAD is not set # CONFIG_MODVERSIONS is not set # CONFIG_MODULE_SRCVERSION_ALL is not set -CONFIG_KMOD=y CONFIG_BLOCK=y -# CONFIG_LBD is not set -# CONFIG_BLK_DEV_IO_TRACE is not set -# CONFIG_LSF is not set +# CONFIG_LBDAF is not set # CONFIG_BLK_DEV_BSG is not set # CONFIG_BLK_DEV_INTEGRITY is not set @@ -101,7 +130,6 @@ CONFIG_IOSCHED_NOOP=y # CONFIG_DEFAULT_CFQ is not set CONFIG_DEFAULT_NOOP=y CONFIG_DEFAULT_IOSCHED="noop" -CONFIG_CLASSIC_RCU=y # CONFIG_PREEMPT_NONE is not set CONFIG_PREEMPT_VOLUNTARY=y # CONFIG_PREEMPT is not set @@ -132,15 +160,15 @@ CONFIG_BF518=y # CONFIG_BF537 is not set # CONFIG_BF538 is not set # CONFIG_BF539 is not set -# CONFIG_BF542 is not set +# CONFIG_BF542_std is not set # CONFIG_BF542M is not set -# CONFIG_BF544 is not set +# CONFIG_BF544_std is not set # CONFIG_BF544M is not set -# CONFIG_BF547 is not set +# CONFIG_BF547_std is not set # CONFIG_BF547M is not set -# CONFIG_BF548 is not set +# CONFIG_BF548_std is not set # CONFIG_BF548M is not set -# CONFIG_BF549 is not set +# CONFIG_BF549_std is not set # CONFIG_BF549M is not set # CONFIG_BF561 is not set CONFIG_BF_REV_MIN=0 @@ -154,8 +182,8 @@ CONFIG_BF_REV_0_0=y # CONFIG_BF_REV_0_6 is not set # CONFIG_BF_REV_ANY is not set # CONFIG_BF_REV_NONE is not set -CONFIG_BF51x=y CONFIG_MEM_MT48LC32M8A2_75=y +CONFIG_BF51x=y CONFIG_BFIN518F_EZBRD=y # @@ -313,7 +341,6 @@ CONFIG_FLATMEM=y CONFIG_FLAT_NODE_MEM_MAP=y CONFIG_PAGEFLAGS_EXTENDED=y CONFIG_SPLIT_PTLOCK_CPUS=4 -# CONFIG_RESOURCES_64BIT is not set # CONFIG_PHYS_ADDR_T_64BIT is not set CONFIG_ZONE_DMA_FLAG=1 CONFIG_VIRT_TO_BUS=y @@ -322,16 +349,18 @@ CONFIG_BFIN_GPTIMERS=m # CONFIG_DMA_UNCACHED_4M is not set # CONFIG_DMA_UNCACHED_2M is not set CONFIG_DMA_UNCACHED_1M=y +# CONFIG_DMA_UNCACHED_512K is not set +# CONFIG_DMA_UNCACHED_256K is not set +# CONFIG_DMA_UNCACHED_128K is not set # CONFIG_DMA_UNCACHED_NONE is not set # # Cache Support # CONFIG_BFIN_ICACHE=y -# CONFIG_BFIN_ICACHE_LOCK is not set +CONFIG_BFIN_EXTMEM_ICACHEABLE=y CONFIG_BFIN_DCACHE=y # CONFIG_BFIN_DCACHE_BANKA is not set -CONFIG_BFIN_EXTMEM_ICACHEABLE=y CONFIG_BFIN_EXTMEM_DCACHEABLE=y CONFIG_BFIN_EXTMEM_WRITEBACK=y # CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set @@ -342,7 +371,7 @@ CONFIG_BFIN_EXTMEM_WRITEBACK=y # CONFIG_MPU is not set # -# Asynchonous Memory Configuration +# Asynchronous Memory Configuration # # @@ -398,11 +427,6 @@ CONFIG_NET=y CONFIG_PACKET=y # CONFIG_PACKET_MMAP is not set CONFIG_UNIX=y -CONFIG_XFRM=y -# CONFIG_XFRM_USER is not set -# CONFIG_XFRM_SUB_POLICY is not set -# CONFIG_XFRM_MIGRATE is not set -# CONFIG_XFRM_STATISTICS is not set # CONFIG_NET_KEY is not set CONFIG_INET=y # CONFIG_IP_MULTICAST is not set @@ -426,7 +450,6 @@ CONFIG_IP_PNP=y # CONFIG_INET_XFRM_MODE_BEET is not set # CONFIG_INET_LRO is not set # CONFIG_INET_DIAG is not set -CONFIG_INET_TCP_DIAG=y # CONFIG_TCP_CONG_ADVANCED is not set CONFIG_TCP_CONG_CUBIC=y CONFIG_DEFAULT_TCP_CONG="cubic" @@ -437,6 +460,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic" # CONFIG_NETFILTER is not set # CONFIG_IP_DCCP is not set # CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set # CONFIG_TIPC is not set # CONFIG_ATM is not set # CONFIG_BRIDGE is not set @@ -450,7 +474,10 @@ CONFIG_DEFAULT_TCP_CONG="cubic" # CONFIG_LAPB is not set # CONFIG_ECONET is not set # CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set # CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set # # Network testing @@ -461,13 +488,8 @@ CONFIG_DEFAULT_TCP_CONG="cubic" # CONFIG_IRDA is not set # CONFIG_BT is not set # CONFIG_AF_RXRPC is not set -# CONFIG_PHONET is not set -CONFIG_WIRELESS=y -# CONFIG_CFG80211 is not set -CONFIG_WIRELESS_OLD_REGULATORY=y -# CONFIG_WIRELESS_EXT is not set -# CONFIG_MAC80211 is not set -# CONFIG_IEEE80211 is not set +# CONFIG_WIRELESS is not set +# CONFIG_WIMAX is not set # CONFIG_RFKILL is not set # CONFIG_NET_9P is not set @@ -488,6 +510,7 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y # CONFIG_CONNECTOR is not set CONFIG_MTD=y # CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_TESTS is not set # CONFIG_MTD_CONCAT is not set CONFIG_MTD_PARTITIONS=y # CONFIG_MTD_REDBOOT_PARTS is not set @@ -545,6 +568,7 @@ CONFIG_MTD_COMPLEX_MAPPINGS=y # # CONFIG_MTD_DATAFLASH is not set # CONFIG_MTD_M25P80 is not set +# CONFIG_MTD_SST25L is not set # CONFIG_MTD_SLRAM is not set # CONFIG_MTD_PHRAM is not set # CONFIG_MTD_MTDRAM is not set @@ -559,6 +583,11 @@ CONFIG_MTD_COMPLEX_MAPPINGS=y # CONFIG_MTD_NAND is not set # CONFIG_MTD_ONENAND is not set +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set + # # UBI - Unsorted block images # @@ -576,10 +605,20 @@ CONFIG_BLK_DEV_RAM_SIZE=4096 # CONFIG_ATA_OVER_ETH is not set # CONFIG_BLK_DEV_HD is not set CONFIG_MISC_DEVICES=y -# CONFIG_EEPROM_93CX6 is not set +# CONFIG_AD525X_DPOT is not set # CONFIG_ICS932S401 is not set # CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_ISL29003 is not set # CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set CONFIG_HAVE_IDE=y # CONFIG_IDE is not set @@ -614,6 +653,9 @@ CONFIG_PHYLIB=y # CONFIG_BROADCOM_PHY is not set # CONFIG_ICPLUS_PHY is not set # CONFIG_REALTEK_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_LSI_ET1011C_PHY is not set # CONFIG_FIXED_PHY is not set # CONFIG_MDIO_BITBANG is not set CONFIG_NET_ETHERNET=y @@ -622,10 +664,14 @@ CONFIG_BFIN_MAC=y CONFIG_BFIN_TX_DESC_NUM=10 CONFIG_BFIN_RX_DESC_NUM=20 # CONFIG_BFIN_MAC_RMII is not set +CONFIG_BFIN_MAC_USE_HWSTAMP=y # CONFIG_SMC91X is not set -# CONFIG_SMSC911X is not set # CONFIG_DM9000 is not set # CONFIG_ENC28J60 is not set +# CONFIG_ETHOC is not set +# CONFIG_SMSC911X is not set +# CONFIG_DNET is not set +# CONFIG_ADF702X is not set # CONFIG_IBM_NEW_EMAC_ZMII is not set # CONFIG_IBM_NEW_EMAC_RGMII is not set # CONFIG_IBM_NEW_EMAC_TAH is not set @@ -634,15 +680,16 @@ CONFIG_BFIN_RX_DESC_NUM=20 # CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set # CONFIG_B44 is not set +# CONFIG_KS8842 is not set +# CONFIG_KS8851 is not set +# CONFIG_KS8851_MLL is not set # CONFIG_NETDEV_1000 is not set # CONFIG_NETDEV_10000 is not set +# CONFIG_WLAN is not set # -# Wireless LAN +# Enable WiMAX (Networking options) to see the WiMAX drivers # -# CONFIG_WLAN_PRE80211 is not set -# CONFIG_WLAN_80211 is not set -# CONFIG_IWLWIFI_LEDS is not set # CONFIG_WAN is not set # CONFIG_PPP is not set # CONFIG_SLIP is not set @@ -677,7 +724,10 @@ CONFIG_INPUT=y # CONFIG_INPUT_TOUCHSCREEN is not set CONFIG_INPUT_MISC=y # CONFIG_INPUT_UINPUT is not set -# CONFIG_CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_PCF8574 is not set # # Hardware I/O ports @@ -688,16 +738,13 @@ CONFIG_INPUT_MISC=y # # Character devices # -# CONFIG_AD9960 is not set CONFIG_BFIN_DMA_INTERFACE=m # CONFIG_BFIN_PPI is not set # CONFIG_BFIN_PPIFCD is not set # CONFIG_BFIN_SIMPLE_TIMER is not set # CONFIG_BFIN_SPI_ADC is not set # CONFIG_BFIN_SPORT is not set -# CONFIG_BFIN_TIMER_LATENCY is not set # CONFIG_BFIN_TWI_LCD is not set -CONFIG_SIMPLE_GPIO=m CONFIG_VT=y CONFIG_CONSOLE_TRANSLATIONS=y CONFIG_VT_CONSOLE=y @@ -715,6 +762,7 @@ CONFIG_BFIN_JTAG_COMM=m # # Non-8250 serial port support # +# CONFIG_SERIAL_MAX3100 is not set CONFIG_SERIAL_BFIN=y CONFIG_SERIAL_BFIN_CONSOLE=y CONFIG_SERIAL_BFIN_DMA=y @@ -726,12 +774,10 @@ CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y # CONFIG_SERIAL_BFIN_SPORT is not set CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set # CONFIG_LEGACY_PTYS is not set - -# -# CAN, the car bus and industrial fieldbus -# -# CONFIG_CAN4LINUX is not set +CONFIG_BFIN_OTP=y +# CONFIG_BFIN_OTP_WRITE_ENABLE is not set # CONFIG_IPMI_HANDLER is not set # CONFIG_HW_RANDOM is not set # CONFIG_R3964 is not set @@ -739,6 +785,7 @@ CONFIG_UNIX98_PTYS=y # CONFIG_TCG_TPM is not set CONFIG_I2C=y CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y CONFIG_I2C_CHARDEV=y CONFIG_I2C_HELPER_AUTO=y @@ -771,14 +818,6 @@ CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_EEPROM_AT24 is not set -# CONFIG_SENSORS_AD5252 is not set -# CONFIG_EEPROM_LEGACY is not set -# CONFIG_SENSORS_PCF8574 is not set -# CONFIG_PCF8575 is not set -# CONFIG_SENSORS_PCA9539 is not set -# CONFIG_SENSORS_PCF8591 is not set -# CONFIG_SENSORS_MAX6875 is not set # CONFIG_SENSORS_TSL2550 is not set # CONFIG_I2C_DEBUG_CORE is not set # CONFIG_I2C_DEBUG_ALGO is not set @@ -795,13 +834,18 @@ CONFIG_SPI_BFIN=y # CONFIG_SPI_BFIN_LOCK is not set # CONFIG_SPI_BFIN_SPORT is not set # CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_GPIO is not set # # SPI Protocol Masters # -# CONFIG_SPI_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set + +# +# PPS support +# +# CONFIG_PPS is not set CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y CONFIG_GPIOLIB=y # CONFIG_DEBUG_GPIO is not set @@ -817,6 +861,7 @@ CONFIG_GPIO_SYSFS=y # CONFIG_GPIO_MAX732X is not set # CONFIG_GPIO_PCA953X is not set # CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_ADP5588 is not set # # PCI GPIO expanders: @@ -827,11 +872,15 @@ CONFIG_GPIO_SYSFS=y # # CONFIG_GPIO_MAX7301 is not set # CONFIG_GPIO_MCP23S08 is not set +# CONFIG_GPIO_MC33880 is not set + +# +# AC97 GPIO expanders: +# # CONFIG_W1 is not set # CONFIG_POWER_SUPPLY is not set # CONFIG_HWMON is not set # CONFIG_THERMAL is not set -# CONFIG_THERMAL_HWMON is not set CONFIG_WATCHDOG=y # CONFIG_WATCHDOG_NOWAYOUT is not set @@ -853,28 +902,20 @@ CONFIG_SSB_POSSIBLE=y # CONFIG_MFD_CORE is not set # CONFIG_MFD_SM501 is not set # CONFIG_HTC_PASIC3 is not set +# CONFIG_TPS65010 is not set +# CONFIG_TWL4030_CORE is not set # CONFIG_MFD_TMIO is not set # CONFIG_PMIC_DA903X is not set # CONFIG_PMIC_ADP5520 is not set # CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X is not set # CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_MC13783 is not set +# CONFIG_AB3100_CORE is not set +# CONFIG_EZX_PCAP is not set # CONFIG_REGULATOR is not set - -# -# Multimedia devices -# - -# -# Multimedia core support -# -# CONFIG_VIDEO_DEV is not set -# CONFIG_DVB_CORE is not set -# CONFIG_VIDEO_MEDIA is not set - -# -# Multimedia drivers -# -# CONFIG_DAB is not set +# CONFIG_MEDIA_SUPPORT is not set # # Graphics support @@ -912,10 +953,11 @@ CONFIG_MMC_BLOCK_BOUNCE=y # MMC/SD/SDIO Host Controller Drivers # # CONFIG_MMC_SDHCI is not set +# CONFIG_MMC_AT91 is not set +# CONFIG_MMC_ATMELMCI is not set +# CONFIG_MMC_SPI is not set CONFIG_SDH_BFIN=m CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND=y -# CONFIG_SDH_BFIN_ENABLE_SDIO_IRQ is not set -# CONFIG_MMC_SPI is not set # CONFIG_MEMSTICK is not set # CONFIG_NEW_LEDS is not set # CONFIG_ACCESSIBILITY is not set @@ -950,6 +992,7 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_S35390A is not set # CONFIG_RTC_DRV_FM3130 is not set # CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set # # SPI RTC drivers @@ -961,6 +1004,7 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_R9701 is not set # CONFIG_RTC_DRV_RS5C348 is not set # CONFIG_RTC_DRV_DS3234 is not set +# CONFIG_RTC_DRV_PCF2123 is not set # # Platform RTC drivers @@ -981,9 +1025,20 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_BFIN=y # CONFIG_DMADEVICES is not set +# CONFIG_AUXDISPLAY is not set # CONFIG_UIO is not set + +# +# TI VLYNQ +# # CONFIG_STAGING is not set +# +# Firmware Drivers +# +# CONFIG_FIRMWARE_MEMMAP is not set +# CONFIG_SIGMA is not set + # # File systems # @@ -994,9 +1049,13 @@ CONFIG_EXT2_FS=m # CONFIG_REISERFS_FS is not set # CONFIG_JFS_FS is not set # CONFIG_FS_POSIX_ACL is not set -CONFIG_FILE_LOCKING=y # CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set # CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y # CONFIG_DNOTIFY is not set CONFIG_INOTIFY=y CONFIG_INOTIFY_USER=y @@ -1005,6 +1064,11 @@ CONFIG_INOTIFY_USER=y # CONFIG_AUTOFS4_FS is not set # CONFIG_FUSE_FS is not set +# +# Caches +# +# CONFIG_FSCACHE is not set + # # CD-ROM/DVD Filesystems # @@ -1027,13 +1091,9 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" CONFIG_PROC_FS=y CONFIG_PROC_SYSCTL=y CONFIG_SYSFS=y -# CONFIG_TMPFS is not set # CONFIG_HUGETLB_PAGE is not set # CONFIG_CONFIGFS_FS is not set - -# -# Miscellaneous filesystems -# +CONFIG_MISC_FILESYSTEMS=y # CONFIG_ADFS_FS is not set # CONFIG_AFFS_FS is not set # CONFIG_HFS_FS is not set @@ -1042,8 +1102,8 @@ CONFIG_SYSFS=y # CONFIG_BFS_FS is not set # CONFIG_EFS_FS is not set # CONFIG_JFFS2_FS is not set -# CONFIG_YAFFS_FS is not set # CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set # CONFIG_VXFS_FS is not set # CONFIG_MINIX_FS is not set # CONFIG_OMFS_FS is not set @@ -1062,7 +1122,6 @@ CONFIG_LOCKD=m CONFIG_LOCKD_V4=y CONFIG_NFS_COMMON=y CONFIG_SUNRPC=m -# CONFIG_SUNRPC_REGISTER_V4 is not set # CONFIG_RPCSEC_GSS_KRB5 is not set # CONFIG_RPCSEC_GSS_SPKM3 is not set CONFIG_SMB_FS=m @@ -1127,14 +1186,19 @@ CONFIG_ENABLE_WARN_DEPRECATED=y CONFIG_ENABLE_MUST_CHECK=y CONFIG_FRAME_WARN=1024 # CONFIG_MAGIC_SYSRQ is not set +# CONFIG_STRIP_ASM_SYMS is not set # CONFIG_UNUSED_SYMBOLS is not set CONFIG_DEBUG_FS=y # CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_SECTION_MISMATCH=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_SHIRQ=y CONFIG_DETECT_SOFTLOCKUP=y # CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_DETECT_HUNG_TASK=y +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 CONFIG_SCHED_DEBUG=y # CONFIG_SCHEDSTATS is not set # CONFIG_TIMER_STATS is not set @@ -1142,31 +1206,39 @@ CONFIG_SCHED_DEBUG=y # CONFIG_DEBUG_SLAB is not set # CONFIG_DEBUG_SPINLOCK is not set # CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set # CONFIG_DEBUG_SPINLOCK_SLEEP is not set # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set # CONFIG_DEBUG_KOBJECT is not set CONFIG_DEBUG_BUGVERBOSE=y CONFIG_DEBUG_INFO=y # CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_NOMMU_REGIONS is not set # CONFIG_DEBUG_WRITECOUNT is not set # CONFIG_DEBUG_MEMORY_INIT is not set # CONFIG_DEBUG_LIST is not set # CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set # CONFIG_FRAME_POINTER is not set # CONFIG_BOOT_PRINTK_DELAY is not set # CONFIG_RCU_TORTURE_TEST is not set # CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_BACKTRACE_SELF_TEST is not set # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set # CONFIG_FAULT_INJECTION is not set - -# -# Tracers -# -# CONFIG_SCHED_TRACER is not set -# CONFIG_CONTEXT_SWITCH_TRACER is not set -# CONFIG_BOOT_TRACER is not set -# CONFIG_DYNAMIC_PRINTK_DEBUG is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_TRACING_SUPPORT=y +# CONFIG_FTRACE is not set +# CONFIG_BRANCH_PROFILE_NONE is not set +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_DYNAMIC_DEBUG is not set # CONFIG_SAMPLES is not set CONFIG_HAVE_ARCH_KGDB=y # CONFIG_KGDB is not set @@ -1191,6 +1263,7 @@ CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE=y CONFIG_EARLY_PRINTK=y CONFIG_CPLB_INFO=y CONFIG_ACCESS_CHECK=y +# CONFIG_BFIN_ISRAM_SELF_TEST is not set # # Security options @@ -1199,14 +1272,14 @@ CONFIG_ACCESS_CHECK=y CONFIG_SECURITY=y # CONFIG_SECURITYFS is not set # CONFIG_SECURITY_NETWORK is not set +# CONFIG_SECURITY_PATH is not set # CONFIG_SECURITY_FILE_CAPABILITIES is not set -CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0 +# CONFIG_SECURITY_TOMOYO is not set CONFIG_CRYPTO=y # # Crypto core or helper # -# CONFIG_CRYPTO_FIPS is not set # CONFIG_CRYPTO_MANAGER is not set # CONFIG_CRYPTO_MANAGER2 is not set # CONFIG_CRYPTO_GF128MUL is not set @@ -1238,11 +1311,13 @@ CONFIG_CRYPTO=y # # CONFIG_CRYPTO_HMAC is not set # CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set # # Digest # # CONFIG_CRYPTO_CRC32C is not set +# CONFIG_CRYPTO_GHASH is not set # CONFIG_CRYPTO_MD4 is not set # CONFIG_CRYPTO_MD5 is not set # CONFIG_CRYPTO_MICHAEL_MIC is not set @@ -1279,6 +1354,7 @@ CONFIG_CRYPTO=y # Compression # # CONFIG_CRYPTO_DEFLATE is not set +# CONFIG_CRYPTO_ZLIB is not set # CONFIG_CRYPTO_LZO is not set # @@ -1286,11 +1362,13 @@ CONFIG_CRYPTO=y # # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_HW=y +# CONFIG_BINARY_PRINTF is not set # # Library routines # CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y CONFIG_CRC_CCITT=m # CONFIG_CRC16 is not set # CONFIG_CRC_T10DIF is not set @@ -1299,6 +1377,8 @@ CONFIG_CRC32=y # CONFIG_CRC7 is not set # CONFIG_LIBCRC32C is not set CONFIG_ZLIB_INFLATE=y +CONFIG_DECOMPRESS_GZIP=y CONFIG_HAS_IOMEM=y CONFIG_HAS_IOPORT=y CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/blackfin/configs/BF526-EZBRD_defconfig b/arch/blackfin/configs/BF526-EZBRD_defconfig index 075e0fdcb399..31c2a6db6ec5 100644 --- a/arch/blackfin/configs/BF526-EZBRD_defconfig +++ b/arch/blackfin/configs/BF526-EZBRD_defconfig @@ -1,22 +1,27 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.28.10 -# Thu May 21 05:50:01 2009 +# Linux kernel version: 2.6.32.2 # # CONFIG_MMU is not set # CONFIG_FPU is not set CONFIG_RWSEM_GENERIC_SPINLOCK=y # CONFIG_RWSEM_XCHGADD_ALGORITHM is not set CONFIG_BLACKFIN=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_BUG=y CONFIG_ZONE_DMA=y CONFIG_GENERIC_FIND_NEXT_BIT=y -CONFIG_GENERIC_HWEIGHT=y CONFIG_GENERIC_HARDIRQS=y CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y CONFIG_GENERIC_GPIO=y CONFIG_FORCE_MAX_ZONEORDER=14 CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y # # General setup @@ -26,22 +31,41 @@ CONFIG_BROKEN_ON_SMP=y CONFIG_INIT_ENV_ARG_LIMIT=32 CONFIG_LOCALVERSION="" CONFIG_LOCALVERSION_AUTO=y +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_BZIP2=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_BZIP2 is not set +# CONFIG_KERNEL_LZMA is not set CONFIG_SYSVIPC=y CONFIG_SYSVIPC_SYSCTL=y # CONFIG_POSIX_MQUEUE is not set # CONFIG_BSD_PROCESS_ACCT is not set # CONFIG_TASKSTATS is not set # CONFIG_AUDIT is not set + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_RCU_TRACE is not set +CONFIG_RCU_FANOUT=32 +# CONFIG_RCU_FANOUT_EXACT is not set +# CONFIG_TREE_RCU_TRACE is not set CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_CGROUPS is not set # CONFIG_GROUP_SCHED is not set +# CONFIG_CGROUPS is not set # CONFIG_SYSFS_DEPRECATED_V2 is not set # CONFIG_RELAY is not set # CONFIG_NAMESPACES is not set CONFIG_BLK_DEV_INITRD=y CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SYSCTL=y CONFIG_ANON_INODES=y @@ -62,6 +86,10 @@ CONFIG_EPOLL=y # CONFIG_TIMERFD is not set # CONFIG_EVENTFD is not set # CONFIG_AIO is not set + +# +# Kernel Performance Events And Counters +# CONFIG_VM_EVENT_COUNTERS=y CONFIG_COMPAT_BRK=y CONFIG_SLAB=y @@ -69,11 +97,15 @@ CONFIG_SLAB=y # CONFIG_SLOB is not set CONFIG_MMAP_ALLOW_UNINITIALIZED=y # CONFIG_PROFILING is not set -# CONFIG_MARKERS is not set CONFIG_HAVE_OPROFILE=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +# CONFIG_SLOW_WORK is not set # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set CONFIG_SLABINFO=y -CONFIG_TINY_SHMEM=y CONFIG_BASE_SMALL=0 CONFIG_MODULES=y # CONFIG_MODULE_FORCE_LOAD is not set @@ -81,11 +113,8 @@ CONFIG_MODULE_UNLOAD=y # CONFIG_MODULE_FORCE_UNLOAD is not set # CONFIG_MODVERSIONS is not set # CONFIG_MODULE_SRCVERSION_ALL is not set -CONFIG_KMOD=y CONFIG_BLOCK=y -# CONFIG_LBD is not set -# CONFIG_BLK_DEV_IO_TRACE is not set -# CONFIG_LSF is not set +# CONFIG_LBDAF is not set # CONFIG_BLK_DEV_BSG is not set # CONFIG_BLK_DEV_INTEGRITY is not set @@ -101,7 +130,6 @@ CONFIG_IOSCHED_NOOP=y # CONFIG_DEFAULT_CFQ is not set CONFIG_DEFAULT_NOOP=y CONFIG_DEFAULT_IOSCHED="noop" -CONFIG_CLASSIC_RCU=y # CONFIG_PREEMPT_NONE is not set CONFIG_PREEMPT_VOLUNTARY=y # CONFIG_PREEMPT is not set @@ -132,15 +160,15 @@ CONFIG_BF526=y # CONFIG_BF537 is not set # CONFIG_BF538 is not set # CONFIG_BF539 is not set -# CONFIG_BF542 is not set +# CONFIG_BF542_std is not set # CONFIG_BF542M is not set -# CONFIG_BF544 is not set +# CONFIG_BF544_std is not set # CONFIG_BF544M is not set -# CONFIG_BF547 is not set +# CONFIG_BF547_std is not set # CONFIG_BF547M is not set -# CONFIG_BF548 is not set +# CONFIG_BF548_std is not set # CONFIG_BF548M is not set -# CONFIG_BF549 is not set +# CONFIG_BF549_std is not set # CONFIG_BF549M is not set # CONFIG_BF561 is not set CONFIG_BF_REV_MIN=0 @@ -154,8 +182,7 @@ CONFIG_BF_REV_0_0=y # CONFIG_BF_REV_0_6 is not set # CONFIG_BF_REV_ANY is not set # CONFIG_BF_REV_NONE is not set -CONFIG_BF52x=y -CONFIG_MEM_MT48LC32M16A2TG_75=y +CONFIG_MEM_MT48H32M16LFCJ_75=y CONFIG_IRQ_PLL_WAKEUP=7 CONFIG_IRQ_DMA0_ERROR=7 CONFIG_IRQ_DMAR0_BLK=7 @@ -200,7 +227,9 @@ CONFIG_IRQ_MEM_DMA1=13 CONFIG_IRQ_WATCH=13 CONFIG_IRQ_PORTF_INTA=13 CONFIG_IRQ_PORTF_INTB=13 +CONFIG_BF52x=y # CONFIG_BFIN527_EZKIT is not set +# CONFIG_BFIN527_EZKIT_V2 is not set # CONFIG_BFIN527_BLUETECHNIX_CM is not set CONFIG_BFIN526_EZBRD=y @@ -318,7 +347,6 @@ CONFIG_FLATMEM=y CONFIG_FLAT_NODE_MEM_MAP=y CONFIG_PAGEFLAGS_EXTENDED=y CONFIG_SPLIT_PTLOCK_CPUS=4 -# CONFIG_RESOURCES_64BIT is not set # CONFIG_PHYS_ADDR_T_64BIT is not set CONFIG_ZONE_DMA_FLAG=1 CONFIG_VIRT_TO_BUS=y @@ -327,16 +355,18 @@ CONFIG_BFIN_GPTIMERS=m # CONFIG_DMA_UNCACHED_4M is not set # CONFIG_DMA_UNCACHED_2M is not set CONFIG_DMA_UNCACHED_1M=y +# CONFIG_DMA_UNCACHED_512K is not set +# CONFIG_DMA_UNCACHED_256K is not set +# CONFIG_DMA_UNCACHED_128K is not set # CONFIG_DMA_UNCACHED_NONE is not set # # Cache Support # CONFIG_BFIN_ICACHE=y -# CONFIG_BFIN_ICACHE_LOCK is not set +CONFIG_BFIN_EXTMEM_ICACHEABLE=y CONFIG_BFIN_DCACHE=y # CONFIG_BFIN_DCACHE_BANKA is not set -CONFIG_BFIN_EXTMEM_ICACHEABLE=y CONFIG_BFIN_EXTMEM_DCACHEABLE=y CONFIG_BFIN_EXTMEM_WRITEBACK=y # CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set @@ -346,6 +376,10 @@ CONFIG_BFIN_EXTMEM_WRITEBACK=y # # CONFIG_MPU is not set +# +# Asynchronous Memory Configuration +# + # # EBIU_AMGCTL Global Control # @@ -399,11 +433,6 @@ CONFIG_NET=y CONFIG_PACKET=y # CONFIG_PACKET_MMAP is not set CONFIG_UNIX=y -CONFIG_XFRM=y -# CONFIG_XFRM_USER is not set -# CONFIG_XFRM_SUB_POLICY is not set -# CONFIG_XFRM_MIGRATE is not set -# CONFIG_XFRM_STATISTICS is not set # CONFIG_NET_KEY is not set CONFIG_INET=y # CONFIG_IP_MULTICAST is not set @@ -427,7 +456,6 @@ CONFIG_IP_PNP=y # CONFIG_INET_XFRM_MODE_BEET is not set # CONFIG_INET_LRO is not set # CONFIG_INET_DIAG is not set -CONFIG_INET_TCP_DIAG=y # CONFIG_TCP_CONG_ADVANCED is not set CONFIG_TCP_CONG_CUBIC=y CONFIG_DEFAULT_TCP_CONG="cubic" @@ -438,6 +466,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic" # CONFIG_NETFILTER is not set # CONFIG_IP_DCCP is not set # CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set # CONFIG_TIPC is not set # CONFIG_ATM is not set # CONFIG_BRIDGE is not set @@ -451,7 +480,10 @@ CONFIG_DEFAULT_TCP_CONG="cubic" # CONFIG_LAPB is not set # CONFIG_ECONET is not set # CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set # CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set # # Network testing @@ -462,13 +494,8 @@ CONFIG_DEFAULT_TCP_CONG="cubic" # CONFIG_IRDA is not set # CONFIG_BT is not set # CONFIG_AF_RXRPC is not set -# CONFIG_PHONET is not set -CONFIG_WIRELESS=y -# CONFIG_CFG80211 is not set -CONFIG_WIRELESS_OLD_REGULATORY=y -# CONFIG_WIRELESS_EXT is not set -# CONFIG_MAC80211 is not set -# CONFIG_IEEE80211 is not set +# CONFIG_WIRELESS is not set +# CONFIG_WIMAX is not set # CONFIG_RFKILL is not set # CONFIG_NET_9P is not set @@ -489,6 +516,7 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y # CONFIG_CONNECTOR is not set CONFIG_MTD=y # CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_TESTS is not set # CONFIG_MTD_CONCAT is not set CONFIG_MTD_PARTITIONS=y # CONFIG_MTD_REDBOOT_PARTS is not set @@ -549,6 +577,7 @@ CONFIG_MTD_PHYSMAP=y # CONFIG_MTD_DATAFLASH is not set CONFIG_MTD_M25P80=y CONFIG_M25PXX_USE_FAST_READ=y +# CONFIG_MTD_SST25L is not set # CONFIG_MTD_SLRAM is not set # CONFIG_MTD_PHRAM is not set # CONFIG_MTD_MTDRAM is not set @@ -564,11 +593,6 @@ CONFIG_MTD_NAND=m # CONFIG_MTD_NAND_VERIFY_WRITE is not set # CONFIG_MTD_NAND_ECC_SMC is not set # CONFIG_MTD_NAND_MUSEUM_IDS is not set -CONFIG_MTD_NAND_BFIN=m -CONFIG_BFIN_NAND_BASE=0x20212000 -CONFIG_BFIN_NAND_CLE=2 -CONFIG_BFIN_NAND_ALE=1 -CONFIG_BFIN_NAND_READY=3 CONFIG_MTD_NAND_IDS=m # CONFIG_MTD_NAND_BF5XX is not set # CONFIG_MTD_NAND_DISKONCHIP is not set @@ -577,6 +601,11 @@ CONFIG_MTD_NAND_IDS=m # CONFIG_MTD_ALAUDA is not set # CONFIG_MTD_ONENAND is not set +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set + # # UBI - Unsorted block images # @@ -595,10 +624,20 @@ CONFIG_BLK_DEV_RAM_SIZE=4096 # CONFIG_ATA_OVER_ETH is not set # CONFIG_BLK_DEV_HD is not set CONFIG_MISC_DEVICES=y -# CONFIG_EEPROM_93CX6 is not set +# CONFIG_AD525X_DPOT is not set # CONFIG_ICS932S401 is not set # CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_ISL29003 is not set # CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set CONFIG_HAVE_IDE=y # CONFIG_IDE is not set @@ -622,10 +661,6 @@ CONFIG_BLK_DEV_SR=m # CONFIG_BLK_DEV_SR_VENDOR is not set # CONFIG_CHR_DEV_SG is not set # CONFIG_CHR_DEV_SCH is not set - -# -# Some SCSI devices (e.g. CD jukebox) support multiple LUNs -# # CONFIG_SCSI_MULTI_LUN is not set # CONFIG_SCSI_CONSTANTS is not set # CONFIG_SCSI_LOGGING is not set @@ -642,6 +677,7 @@ CONFIG_SCSI_WAIT_SCAN=m # CONFIG_SCSI_SRP_ATTRS is not set # CONFIG_SCSI_LOWLEVEL is not set # CONFIG_SCSI_DH is not set +# CONFIG_SCSI_OSD_INITIATOR is not set # CONFIG_ATA is not set # CONFIG_MD is not set CONFIG_NETDEVICES=y @@ -666,6 +702,9 @@ CONFIG_PHYLIB=y # CONFIG_BROADCOM_PHY is not set # CONFIG_ICPLUS_PHY is not set # CONFIG_REALTEK_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_LSI_ET1011C_PHY is not set # CONFIG_FIXED_PHY is not set # CONFIG_MDIO_BITBANG is not set CONFIG_NET_ETHERNET=y @@ -675,9 +714,12 @@ CONFIG_BFIN_TX_DESC_NUM=10 CONFIG_BFIN_RX_DESC_NUM=20 CONFIG_BFIN_MAC_RMII=y # CONFIG_SMC91X is not set -# CONFIG_SMSC911X is not set # CONFIG_DM9000 is not set # CONFIG_ENC28J60 is not set +# CONFIG_ETHOC is not set +# CONFIG_SMSC911X is not set +# CONFIG_DNET is not set +# CONFIG_ADF702X is not set # CONFIG_IBM_NEW_EMAC_ZMII is not set # CONFIG_IBM_NEW_EMAC_RGMII is not set # CONFIG_IBM_NEW_EMAC_TAH is not set @@ -686,15 +728,16 @@ CONFIG_BFIN_MAC_RMII=y # CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set # CONFIG_B44 is not set +# CONFIG_KS8842 is not set +# CONFIG_KS8851 is not set +# CONFIG_KS8851_MLL is not set # CONFIG_NETDEV_1000 is not set # CONFIG_NETDEV_10000 is not set +# CONFIG_WLAN is not set # -# Wireless LAN +# Enable WiMAX (Networking options) to see the WiMAX drivers # -# CONFIG_WLAN_PRE80211 is not set -# CONFIG_WLAN_80211 is not set -# CONFIG_IWLWIFI_LEDS is not set # # USB Network Adapters @@ -744,7 +787,11 @@ CONFIG_INPUT_MISC=y # CONFIG_INPUT_YEALINK is not set # CONFIG_INPUT_CM109 is not set # CONFIG_INPUT_UINPUT is not set -# CONFIG_CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set +# CONFIG_INPUT_BFIN_ROTARY is not set +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_PCF8574 is not set # # Hardware I/O ports @@ -755,16 +802,13 @@ CONFIG_INPUT_MISC=y # # Character devices # -# CONFIG_AD9960 is not set CONFIG_BFIN_DMA_INTERFACE=m # CONFIG_BFIN_PPI is not set # CONFIG_BFIN_PPIFCD is not set # CONFIG_BFIN_SIMPLE_TIMER is not set # CONFIG_BFIN_SPI_ADC is not set # CONFIG_BFIN_SPORT is not set -# CONFIG_BFIN_TIMER_LATENCY is not set # CONFIG_BFIN_TWI_LCD is not set -CONFIG_SIMPLE_GPIO=m CONFIG_VT=y CONFIG_CONSOLE_TRANSLATIONS=y CONFIG_VT_CONSOLE=y @@ -782,6 +826,7 @@ CONFIG_BFIN_JTAG_COMM=m # # Non-8250 serial port support # +# CONFIG_SERIAL_MAX3100 is not set CONFIG_SERIAL_BFIN=y CONFIG_SERIAL_BFIN_CONSOLE=y CONFIG_SERIAL_BFIN_DMA=y @@ -793,14 +838,10 @@ CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y # CONFIG_SERIAL_BFIN_SPORT is not set CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set # CONFIG_LEGACY_PTYS is not set CONFIG_BFIN_OTP=y # CONFIG_BFIN_OTP_WRITE_ENABLE is not set - -# -# CAN, the car bus and industrial fieldbus -# -# CONFIG_CAN4LINUX is not set # CONFIG_IPMI_HANDLER is not set # CONFIG_HW_RANDOM is not set # CONFIG_R3964 is not set @@ -808,6 +849,7 @@ CONFIG_BFIN_OTP=y # CONFIG_TCG_TPM is not set CONFIG_I2C=y CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y CONFIG_I2C_CHARDEV=m CONFIG_I2C_HELPER_AUTO=y @@ -841,14 +883,6 @@ CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_EEPROM_AT24 is not set -# CONFIG_SENSORS_AD5252 is not set -# CONFIG_EEPROM_LEGACY is not set -# CONFIG_SENSORS_PCF8574 is not set -# CONFIG_PCF8575 is not set -# CONFIG_SENSORS_PCA9539 is not set -# CONFIG_SENSORS_PCF8591 is not set -# CONFIG_SENSORS_MAX6875 is not set # CONFIG_SENSORS_TSL2550 is not set # CONFIG_I2C_DEBUG_CORE is not set # CONFIG_I2C_DEBUG_ALGO is not set @@ -865,13 +899,18 @@ CONFIG_SPI_BFIN=y # CONFIG_SPI_BFIN_LOCK is not set # CONFIG_SPI_BFIN_SPORT is not set # CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_GPIO is not set # # SPI Protocol Masters # -# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set + +# +# PPS support +# +# CONFIG_PPS is not set CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y CONFIG_GPIOLIB=y # CONFIG_DEBUG_GPIO is not set @@ -887,6 +926,7 @@ CONFIG_GPIO_SYSFS=y # CONFIG_GPIO_MAX732X is not set # CONFIG_GPIO_PCA953X is not set # CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_ADP5588 is not set # # PCI GPIO expanders: @@ -897,11 +937,20 @@ CONFIG_GPIO_SYSFS=y # # CONFIG_GPIO_MAX7301 is not set # CONFIG_GPIO_MCP23S08 is not set +# CONFIG_GPIO_MC33880 is not set + +# +# AC97 GPIO expanders: +# # CONFIG_W1 is not set # CONFIG_POWER_SUPPLY is not set CONFIG_HWMON=y # CONFIG_HWMON_VID is not set -# CONFIG_SENSORS_AD5252 is not set +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# # CONFIG_SENSORS_AD7414 is not set # CONFIG_SENSORS_AD7418 is not set # CONFIG_SENSORS_ADCXX is not set @@ -914,11 +963,13 @@ CONFIG_HWMON=y # CONFIG_SENSORS_ADT7462 is not set # CONFIG_SENSORS_ADT7470 is not set # CONFIG_SENSORS_ADT7473 is not set +# CONFIG_SENSORS_ADT7475 is not set # CONFIG_SENSORS_ATXP1 is not set # CONFIG_SENSORS_DS1621 is not set # CONFIG_SENSORS_F71805F is not set # CONFIG_SENSORS_F71882FG is not set # CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_G760A is not set # CONFIG_SENSORS_GL518SM is not set # CONFIG_SENSORS_GL520SM is not set # CONFIG_SENSORS_IT87 is not set @@ -934,17 +985,24 @@ CONFIG_HWMON=y # CONFIG_SENSORS_LM90 is not set # CONFIG_SENSORS_LM92 is not set # CONFIG_SENSORS_LM93 is not set +# CONFIG_SENSORS_LTC4215 is not set +# CONFIG_SENSORS_LTC4245 is not set +# CONFIG_SENSORS_LM95241 is not set # CONFIG_SENSORS_MAX1111 is not set # CONFIG_SENSORS_MAX1619 is not set # CONFIG_SENSORS_MAX6650 is not set # CONFIG_SENSORS_PC87360 is not set # CONFIG_SENSORS_PC87427 is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_SENSORS_SHT15 is not set # CONFIG_SENSORS_DME1737 is not set # CONFIG_SENSORS_SMSC47M1 is not set # CONFIG_SENSORS_SMSC47M192 is not set # CONFIG_SENSORS_SMSC47B397 is not set # CONFIG_SENSORS_ADS7828 is not set # CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP401 is not set +# CONFIG_SENSORS_TMP421 is not set # CONFIG_SENSORS_VT1211 is not set # CONFIG_SENSORS_W83781D is not set # CONFIG_SENSORS_W83791D is not set @@ -954,9 +1012,8 @@ CONFIG_HWMON=y # CONFIG_SENSORS_W83L786NG is not set # CONFIG_SENSORS_W83627HF is not set # CONFIG_SENSORS_W83627EHF is not set -# CONFIG_HWMON_DEBUG_CHIP is not set +# CONFIG_SENSORS_LIS3_SPI is not set # CONFIG_THERMAL is not set -# CONFIG_THERMAL_HWMON is not set CONFIG_WATCHDOG=y # CONFIG_WATCHDOG_NOWAYOUT is not set @@ -983,28 +1040,20 @@ CONFIG_SSB_POSSIBLE=y # CONFIG_MFD_CORE is not set # CONFIG_MFD_SM501 is not set # CONFIG_HTC_PASIC3 is not set +# CONFIG_TPS65010 is not set +# CONFIG_TWL4030_CORE is not set # CONFIG_MFD_TMIO is not set # CONFIG_PMIC_DA903X is not set # CONFIG_PMIC_ADP5520 is not set # CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X is not set # CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_MC13783 is not set +# CONFIG_AB3100_CORE is not set +# CONFIG_EZX_PCAP is not set # CONFIG_REGULATOR is not set - -# -# Multimedia devices -# - -# -# Multimedia core support -# -# CONFIG_VIDEO_DEV is not set -# CONFIG_DVB_CORE is not set -# CONFIG_VIDEO_MEDIA is not set - -# -# Multimedia drivers -# -# CONFIG_DAB is not set +# CONFIG_MEDIA_SUPPORT is not set # # Graphics support @@ -1026,7 +1075,6 @@ CONFIG_DUMMY_CONSOLE=y # CONFIG_SOUND is not set CONFIG_HID_SUPPORT=y CONFIG_HID=y -# CONFIG_HID_DEBUG is not set # CONFIG_HIDRAW is not set # @@ -1039,30 +1087,35 @@ CONFIG_USB_HID=y # # Special HID drivers # -CONFIG_HID_COMPAT=y CONFIG_HID_A4TECH=y CONFIG_HID_APPLE=y CONFIG_HID_BELKIN=y -CONFIG_HID_BRIGHT=y CONFIG_HID_CHERRY=y CONFIG_HID_CHICONY=y CONFIG_HID_CYPRESS=y -CONFIG_HID_DELL=y +# CONFIG_HID_DRAGONRISE is not set CONFIG_HID_EZKEY=y +# CONFIG_HID_KYE is not set CONFIG_HID_GYRATION=y +# CONFIG_HID_TWINHAN is not set +# CONFIG_HID_KENSINGTON is not set CONFIG_HID_LOGITECH=y # CONFIG_LOGITECH_FF is not set # CONFIG_LOGIRUMBLEPAD2_FF is not set CONFIG_HID_MICROSOFT=y CONFIG_HID_MONTEREY=y +# CONFIG_HID_NTRIG is not set CONFIG_HID_PANTHERLORD=y # CONFIG_PANTHERLORD_FF is not set CONFIG_HID_PETALYNX=y CONFIG_HID_SAMSUNG=y CONFIG_HID_SONY=y CONFIG_HID_SUNPLUS=y -CONFIG_THRUSTMASTER_FF=m -CONFIG_ZEROPLUS_FF=m +# CONFIG_HID_GREENASIA is not set +# CONFIG_HID_SMARTJOYPLUS is not set +# CONFIG_HID_TOPSEED is not set +# CONFIG_HID_THRUSTMASTER is not set +# CONFIG_HID_ZEROPLUS is not set CONFIG_USB_SUPPORT=y CONFIG_USB_ARCH_HAS_HCD=y # CONFIG_USB_ARCH_HAS_OHCI is not set @@ -1088,6 +1141,7 @@ CONFIG_USB_MON=y # USB Host Controller Drivers # # CONFIG_USB_C67X00_HCD is not set +# CONFIG_USB_OXU210HP_HCD is not set # CONFIG_USB_ISP116X_HCD is not set # CONFIG_USB_ISP1760_HCD is not set # CONFIG_USB_ISP1362_HCD is not set @@ -1118,18 +1172,17 @@ CONFIG_USB_INVENTRA_DMA=y # CONFIG_USB_TMC is not set # -# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed; +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may # # -# see USB_STORAGE Help for more information +# also be needed; see USB_STORAGE Help for more info # CONFIG_USB_STORAGE=y # CONFIG_USB_STORAGE_DEBUG is not set # CONFIG_USB_STORAGE_DATAFAB is not set # CONFIG_USB_STORAGE_FREECOM is not set # CONFIG_USB_STORAGE_ISD200 is not set -# CONFIG_USB_STORAGE_DPCM is not set # CONFIG_USB_STORAGE_USBAT is not set # CONFIG_USB_STORAGE_SDDR09 is not set # CONFIG_USB_STORAGE_SDDR55 is not set @@ -1165,7 +1218,6 @@ CONFIG_USB_STORAGE=y # CONFIG_USB_LED is not set # CONFIG_USB_CYPRESS_CY7C63 is not set # CONFIG_USB_CYTHERM is not set -# CONFIG_USB_PHIDGET is not set # CONFIG_USB_IDMOUSE is not set # CONFIG_USB_FTDI_ELAN is not set # CONFIG_USB_APPLEDISPLAY is not set @@ -1177,6 +1229,13 @@ CONFIG_USB_STORAGE=y # CONFIG_USB_ISIGHTFW is not set # CONFIG_USB_VST is not set # CONFIG_USB_GADGET is not set + +# +# OTG and related infrastructure +# +CONFIG_USB_OTG_UTILS=y +# CONFIG_USB_GPIO_VBUS is not set +CONFIG_NOP_USB_XCEIV=y # CONFIG_MMC is not set # CONFIG_MEMSTICK is not set # CONFIG_NEW_LEDS is not set @@ -1212,6 +1271,7 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_S35390A is not set # CONFIG_RTC_DRV_FM3130 is not set # CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set # # SPI RTC drivers @@ -1223,6 +1283,7 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_R9701 is not set # CONFIG_RTC_DRV_RS5C348 is not set # CONFIG_RTC_DRV_DS3234 is not set +# CONFIG_RTC_DRV_PCF2123 is not set # # Platform RTC drivers @@ -1243,9 +1304,20 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_BFIN=y # CONFIG_DMADEVICES is not set +# CONFIG_AUXDISPLAY is not set # CONFIG_UIO is not set + +# +# TI VLYNQ +# # CONFIG_STAGING is not set +# +# Firmware Drivers +# +# CONFIG_FIRMWARE_MEMMAP is not set +# CONFIG_SIGMA is not set + # # File systems # @@ -1256,9 +1328,13 @@ CONFIG_EXT2_FS=m # CONFIG_REISERFS_FS is not set # CONFIG_JFS_FS is not set # CONFIG_FS_POSIX_ACL is not set -CONFIG_FILE_LOCKING=y # CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set # CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y # CONFIG_DNOTIFY is not set CONFIG_INOTIFY=y CONFIG_INOTIFY_USER=y @@ -1267,6 +1343,11 @@ CONFIG_INOTIFY_USER=y # CONFIG_AUTOFS4_FS is not set # CONFIG_FUSE_FS is not set +# +# Caches +# +# CONFIG_FSCACHE is not set + # # CD-ROM/DVD Filesystems # @@ -1291,13 +1372,9 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" CONFIG_PROC_FS=y CONFIG_PROC_SYSCTL=y CONFIG_SYSFS=y -# CONFIG_TMPFS is not set # CONFIG_HUGETLB_PAGE is not set # CONFIG_CONFIGFS_FS is not set - -# -# Miscellaneous filesystems -# +CONFIG_MISC_FILESYSTEMS=y # CONFIG_ADFS_FS is not set # CONFIG_AFFS_FS is not set # CONFIG_HFS_FS is not set @@ -1316,17 +1393,8 @@ CONFIG_JFFS2_ZLIB=y # CONFIG_JFFS2_LZO is not set CONFIG_JFFS2_RTIME=y # CONFIG_JFFS2_RUBIN is not set -CONFIG_YAFFS_FS=m -CONFIG_YAFFS_YAFFS1=y -# CONFIG_YAFFS_9BYTE_TAGS is not set -# CONFIG_YAFFS_DOES_ECC is not set -CONFIG_YAFFS_YAFFS2=y -CONFIG_YAFFS_AUTO_YAFFS2=y -# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set -# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set -# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set -CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y # CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set # CONFIG_VXFS_FS is not set # CONFIG_MINIX_FS is not set # CONFIG_OMFS_FS is not set @@ -1345,7 +1413,6 @@ CONFIG_LOCKD=m CONFIG_LOCKD_V4=y CONFIG_NFS_COMMON=y CONFIG_SUNRPC=m -# CONFIG_SUNRPC_REGISTER_V4 is not set # CONFIG_RPCSEC_GSS_KRB5 is not set # CONFIG_RPCSEC_GSS_SPKM3 is not set CONFIG_SMB_FS=m @@ -1360,7 +1427,7 @@ CONFIG_SMB_FS=m # # CONFIG_PARTITION_ADVANCED is not set CONFIG_MSDOS_PARTITION=y -CONFIG_NLS=m +CONFIG_NLS=y CONFIG_NLS_DEFAULT="iso8859-1" CONFIG_NLS_CODEPAGE_437=m # CONFIG_NLS_CODEPAGE_737 is not set @@ -1410,14 +1477,19 @@ CONFIG_ENABLE_WARN_DEPRECATED=y CONFIG_ENABLE_MUST_CHECK=y CONFIG_FRAME_WARN=1024 # CONFIG_MAGIC_SYSRQ is not set +# CONFIG_STRIP_ASM_SYMS is not set # CONFIG_UNUSED_SYMBOLS is not set CONFIG_DEBUG_FS=y # CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_SECTION_MISMATCH=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_SHIRQ=y CONFIG_DETECT_SOFTLOCKUP=y # CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_DETECT_HUNG_TASK=y +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 CONFIG_SCHED_DEBUG=y # CONFIG_SCHEDSTATS is not set # CONFIG_TIMER_STATS is not set @@ -1425,31 +1497,39 @@ CONFIG_SCHED_DEBUG=y # CONFIG_DEBUG_SLAB is not set # CONFIG_DEBUG_SPINLOCK is not set # CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set # CONFIG_DEBUG_SPINLOCK_SLEEP is not set # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set # CONFIG_DEBUG_KOBJECT is not set CONFIG_DEBUG_BUGVERBOSE=y CONFIG_DEBUG_INFO=y # CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_NOMMU_REGIONS is not set # CONFIG_DEBUG_WRITECOUNT is not set # CONFIG_DEBUG_MEMORY_INIT is not set # CONFIG_DEBUG_LIST is not set # CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set # CONFIG_FRAME_POINTER is not set # CONFIG_BOOT_PRINTK_DELAY is not set # CONFIG_RCU_TORTURE_TEST is not set # CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_BACKTRACE_SELF_TEST is not set # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set # CONFIG_FAULT_INJECTION is not set - -# -# Tracers -# -# CONFIG_SCHED_TRACER is not set -# CONFIG_CONTEXT_SWITCH_TRACER is not set -# CONFIG_BOOT_TRACER is not set -# CONFIG_DYNAMIC_PRINTK_DEBUG is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_TRACING_SUPPORT=y +# CONFIG_FTRACE is not set +# CONFIG_BRANCH_PROFILE_NONE is not set +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_DYNAMIC_DEBUG is not set # CONFIG_SAMPLES is not set CONFIG_HAVE_ARCH_KGDB=y # CONFIG_KGDB is not set @@ -1474,6 +1554,7 @@ CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE=y CONFIG_EARLY_PRINTK=y CONFIG_CPLB_INFO=y CONFIG_ACCESS_CHECK=y +# CONFIG_BFIN_ISRAM_SELF_TEST is not set # # Security options @@ -1482,15 +1563,15 @@ CONFIG_ACCESS_CHECK=y CONFIG_SECURITY=y # CONFIG_SECURITYFS is not set # CONFIG_SECURITY_NETWORK is not set +# CONFIG_SECURITY_PATH is not set # CONFIG_SECURITY_FILE_CAPABILITIES is not set # CONFIG_SECURITY_ROOTPLUG is not set -CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0 +# CONFIG_SECURITY_TOMOYO is not set CONFIG_CRYPTO=y # # Crypto core or helper # -# CONFIG_CRYPTO_FIPS is not set # CONFIG_CRYPTO_MANAGER is not set # CONFIG_CRYPTO_MANAGER2 is not set # CONFIG_CRYPTO_GF128MUL is not set @@ -1522,11 +1603,13 @@ CONFIG_CRYPTO=y # # CONFIG_CRYPTO_HMAC is not set # CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set # # Digest # # CONFIG_CRYPTO_CRC32C is not set +# CONFIG_CRYPTO_GHASH is not set # CONFIG_CRYPTO_MD4 is not set # CONFIG_CRYPTO_MD5 is not set # CONFIG_CRYPTO_MICHAEL_MIC is not set @@ -1563,6 +1646,7 @@ CONFIG_CRYPTO=y # Compression # # CONFIG_CRYPTO_DEFLATE is not set +# CONFIG_CRYPTO_ZLIB is not set # CONFIG_CRYPTO_LZO is not set # @@ -1570,11 +1654,13 @@ CONFIG_CRYPTO=y # # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_HW=y +# CONFIG_BINARY_PRINTF is not set # # Library routines # CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y CONFIG_CRC_CCITT=m # CONFIG_CRC16 is not set # CONFIG_CRC_T10DIF is not set @@ -1584,6 +1670,8 @@ CONFIG_CRC32=y # CONFIG_LIBCRC32C is not set CONFIG_ZLIB_INFLATE=y CONFIG_ZLIB_DEFLATE=m +CONFIG_DECOMPRESS_GZIP=y CONFIG_HAS_IOMEM=y CONFIG_HAS_IOPORT=y CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/blackfin/configs/BF527-EZKIT_defconfig b/arch/blackfin/configs/BF527-EZKIT_defconfig index 6d1a623fb149..edbb44d26bbf 100644 --- a/arch/blackfin/configs/BF527-EZKIT_defconfig +++ b/arch/blackfin/configs/BF527-EZKIT_defconfig @@ -1,22 +1,27 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.28.10 -# Thu May 21 05:50:01 2009 +# Linux kernel version: 2.6.32.2 # # CONFIG_MMU is not set # CONFIG_FPU is not set CONFIG_RWSEM_GENERIC_SPINLOCK=y # CONFIG_RWSEM_XCHGADD_ALGORITHM is not set CONFIG_BLACKFIN=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_BUG=y CONFIG_ZONE_DMA=y CONFIG_GENERIC_FIND_NEXT_BIT=y -CONFIG_GENERIC_HWEIGHT=y CONFIG_GENERIC_HARDIRQS=y CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y CONFIG_GENERIC_GPIO=y CONFIG_FORCE_MAX_ZONEORDER=14 CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y # # General setup @@ -26,22 +31,41 @@ CONFIG_BROKEN_ON_SMP=y CONFIG_INIT_ENV_ARG_LIMIT=32 CONFIG_LOCALVERSION="" CONFIG_LOCALVERSION_AUTO=y +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_BZIP2=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_BZIP2 is not set +# CONFIG_KERNEL_LZMA is not set CONFIG_SYSVIPC=y CONFIG_SYSVIPC_SYSCTL=y # CONFIG_POSIX_MQUEUE is not set # CONFIG_BSD_PROCESS_ACCT is not set # CONFIG_TASKSTATS is not set # CONFIG_AUDIT is not set + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_RCU_TRACE is not set +CONFIG_RCU_FANOUT=32 +# CONFIG_RCU_FANOUT_EXACT is not set +# CONFIG_TREE_RCU_TRACE is not set CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_CGROUPS is not set # CONFIG_GROUP_SCHED is not set +# CONFIG_CGROUPS is not set # CONFIG_SYSFS_DEPRECATED_V2 is not set # CONFIG_RELAY is not set # CONFIG_NAMESPACES is not set CONFIG_BLK_DEV_INITRD=y CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SYSCTL=y CONFIG_ANON_INODES=y @@ -62,6 +86,10 @@ CONFIG_EPOLL=y # CONFIG_TIMERFD is not set # CONFIG_EVENTFD is not set # CONFIG_AIO is not set + +# +# Kernel Performance Events And Counters +# CONFIG_VM_EVENT_COUNTERS=y CONFIG_COMPAT_BRK=y CONFIG_SLAB=y @@ -69,11 +97,15 @@ CONFIG_SLAB=y # CONFIG_SLOB is not set CONFIG_MMAP_ALLOW_UNINITIALIZED=y # CONFIG_PROFILING is not set -# CONFIG_MARKERS is not set CONFIG_HAVE_OPROFILE=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +# CONFIG_SLOW_WORK is not set # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set CONFIG_SLABINFO=y -CONFIG_TINY_SHMEM=y CONFIG_BASE_SMALL=0 CONFIG_MODULES=y # CONFIG_MODULE_FORCE_LOAD is not set @@ -81,11 +113,8 @@ CONFIG_MODULE_UNLOAD=y # CONFIG_MODULE_FORCE_UNLOAD is not set # CONFIG_MODVERSIONS is not set # CONFIG_MODULE_SRCVERSION_ALL is not set -CONFIG_KMOD=y CONFIG_BLOCK=y -# CONFIG_LBD is not set -# CONFIG_BLK_DEV_IO_TRACE is not set -# CONFIG_LSF is not set +# CONFIG_LBDAF is not set # CONFIG_BLK_DEV_BSG is not set # CONFIG_BLK_DEV_INTEGRITY is not set @@ -101,7 +130,6 @@ CONFIG_DEFAULT_AS=y # CONFIG_DEFAULT_CFQ is not set # CONFIG_DEFAULT_NOOP is not set CONFIG_DEFAULT_IOSCHED="anticipatory" -CONFIG_CLASSIC_RCU=y # CONFIG_PREEMPT_NONE is not set CONFIG_PREEMPT_VOLUNTARY=y # CONFIG_PREEMPT is not set @@ -132,29 +160,28 @@ CONFIG_BF527=y # CONFIG_BF537 is not set # CONFIG_BF538 is not set # CONFIG_BF539 is not set -# CONFIG_BF542 is not set +# CONFIG_BF542_std is not set # CONFIG_BF542M is not set -# CONFIG_BF544 is not set +# CONFIG_BF544_std is not set # CONFIG_BF544M is not set -# CONFIG_BF547 is not set +# CONFIG_BF547_std is not set # CONFIG_BF547M is not set -# CONFIG_BF548 is not set +# CONFIG_BF548_std is not set # CONFIG_BF548M is not set -# CONFIG_BF549 is not set +# CONFIG_BF549_std is not set # CONFIG_BF549M is not set # CONFIG_BF561 is not set CONFIG_BF_REV_MIN=0 CONFIG_BF_REV_MAX=2 # CONFIG_BF_REV_0_0 is not set -# CONFIG_BF_REV_0_1 is not set -CONFIG_BF_REV_0_2=y +CONFIG_BF_REV_0_1=y +# CONFIG_BF_REV_0_2 is not set # CONFIG_BF_REV_0_3 is not set # CONFIG_BF_REV_0_4 is not set # CONFIG_BF_REV_0_5 is not set # CONFIG_BF_REV_0_6 is not set # CONFIG_BF_REV_ANY is not set # CONFIG_BF_REV_NONE is not set -CONFIG_BF52x=y CONFIG_MEM_MT48LC32M16A2TG_75=y CONFIG_IRQ_PLL_WAKEUP=7 CONFIG_IRQ_DMA0_ERROR=7 @@ -200,7 +227,9 @@ CONFIG_IRQ_MEM_DMA1=13 CONFIG_IRQ_WATCH=13 CONFIG_IRQ_PORTF_INTA=13 CONFIG_IRQ_PORTF_INTB=13 +CONFIG_BF52x=y CONFIG_BFIN527_EZKIT=y +# CONFIG_BFIN527_EZKIT_V2 is not set # CONFIG_BFIN527_BLUETECHNIX_CM is not set # CONFIG_BFIN526_EZBRD is not set @@ -318,7 +347,6 @@ CONFIG_FLATMEM=y CONFIG_FLAT_NODE_MEM_MAP=y CONFIG_PAGEFLAGS_EXTENDED=y CONFIG_SPLIT_PTLOCK_CPUS=4 -# CONFIG_RESOURCES_64BIT is not set # CONFIG_PHYS_ADDR_T_64BIT is not set CONFIG_ZONE_DMA_FLAG=1 CONFIG_VIRT_TO_BUS=y @@ -327,16 +355,18 @@ CONFIG_BFIN_GPTIMERS=y # CONFIG_DMA_UNCACHED_4M is not set # CONFIG_DMA_UNCACHED_2M is not set CONFIG_DMA_UNCACHED_1M=y +# CONFIG_DMA_UNCACHED_512K is not set +# CONFIG_DMA_UNCACHED_256K is not set +# CONFIG_DMA_UNCACHED_128K is not set # CONFIG_DMA_UNCACHED_NONE is not set # # Cache Support # CONFIG_BFIN_ICACHE=y -# CONFIG_BFIN_ICACHE_LOCK is not set +CONFIG_BFIN_EXTMEM_ICACHEABLE=y CONFIG_BFIN_DCACHE=y # CONFIG_BFIN_DCACHE_BANKA is not set -CONFIG_BFIN_EXTMEM_ICACHEABLE=y CONFIG_BFIN_EXTMEM_DCACHEABLE=y CONFIG_BFIN_EXTMEM_WRITEBACK=y # CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set @@ -347,7 +377,7 @@ CONFIG_BFIN_EXTMEM_WRITEBACK=y # CONFIG_MPU is not set # -# Asynchonous Memory Configuration +# Asynchronous Memory Configuration # # @@ -403,11 +433,6 @@ CONFIG_NET=y CONFIG_PACKET=y # CONFIG_PACKET_MMAP is not set CONFIG_UNIX=y -CONFIG_XFRM=y -# CONFIG_XFRM_USER is not set -# CONFIG_XFRM_SUB_POLICY is not set -# CONFIG_XFRM_MIGRATE is not set -# CONFIG_XFRM_STATISTICS is not set # CONFIG_NET_KEY is not set CONFIG_INET=y # CONFIG_IP_MULTICAST is not set @@ -431,7 +456,6 @@ CONFIG_IP_PNP=y # CONFIG_INET_XFRM_MODE_BEET is not set # CONFIG_INET_LRO is not set # CONFIG_INET_DIAG is not set -CONFIG_INET_TCP_DIAG=y # CONFIG_TCP_CONG_ADVANCED is not set CONFIG_TCP_CONG_CUBIC=y CONFIG_DEFAULT_TCP_CONG="cubic" @@ -442,6 +466,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic" # CONFIG_NETFILTER is not set # CONFIG_IP_DCCP is not set # CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set # CONFIG_TIPC is not set # CONFIG_ATM is not set # CONFIG_BRIDGE is not set @@ -455,7 +480,10 @@ CONFIG_DEFAULT_TCP_CONG="cubic" # CONFIG_LAPB is not set # CONFIG_ECONET is not set # CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set # CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set # # Network testing @@ -508,13 +536,8 @@ CONFIG_SIR_BFIN_DMA=y # CONFIG_MCS_FIR is not set # CONFIG_BT is not set # CONFIG_AF_RXRPC is not set -# CONFIG_PHONET is not set -CONFIG_WIRELESS=y -# CONFIG_CFG80211 is not set -CONFIG_WIRELESS_OLD_REGULATORY=y -# CONFIG_WIRELESS_EXT is not set -# CONFIG_MAC80211 is not set -# CONFIG_IEEE80211 is not set +# CONFIG_WIRELESS is not set +# CONFIG_WIMAX is not set # CONFIG_RFKILL is not set # CONFIG_NET_9P is not set @@ -535,6 +558,7 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y # CONFIG_CONNECTOR is not set CONFIG_MTD=y # CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_TESTS is not set # CONFIG_MTD_CONCAT is not set CONFIG_MTD_PARTITIONS=y # CONFIG_MTD_REDBOOT_PARTS is not set @@ -593,6 +617,7 @@ CONFIG_MTD_COMPLEX_MAPPINGS=y # CONFIG_MTD_DATAFLASH is not set CONFIG_MTD_M25P80=y CONFIG_M25PXX_USE_FAST_READ=y +# CONFIG_MTD_SST25L is not set # CONFIG_MTD_SLRAM is not set # CONFIG_MTD_PHRAM is not set # CONFIG_MTD_MTDRAM is not set @@ -608,11 +633,6 @@ CONFIG_MTD_NAND=m # CONFIG_MTD_NAND_VERIFY_WRITE is not set # CONFIG_MTD_NAND_ECC_SMC is not set # CONFIG_MTD_NAND_MUSEUM_IDS is not set -CONFIG_MTD_NAND_BFIN=m -CONFIG_BFIN_NAND_BASE=0x20212000 -CONFIG_BFIN_NAND_CLE=2 -CONFIG_BFIN_NAND_ALE=1 -CONFIG_BFIN_NAND_READY=3 CONFIG_MTD_NAND_IDS=m # CONFIG_MTD_NAND_BF5XX is not set # CONFIG_MTD_NAND_DISKONCHIP is not set @@ -621,6 +641,11 @@ CONFIG_MTD_NAND_IDS=m # CONFIG_MTD_ALAUDA is not set # CONFIG_MTD_ONENAND is not set +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set + # # UBI - Unsorted block images # @@ -639,10 +664,20 @@ CONFIG_BLK_DEV_RAM_SIZE=4096 # CONFIG_ATA_OVER_ETH is not set # CONFIG_BLK_DEV_HD is not set CONFIG_MISC_DEVICES=y -# CONFIG_EEPROM_93CX6 is not set +# CONFIG_AD525X_DPOT is not set # CONFIG_ICS932S401 is not set # CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_ISL29003 is not set # CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set CONFIG_HAVE_IDE=y # CONFIG_IDE is not set @@ -666,10 +701,6 @@ CONFIG_BLK_DEV_SR=m # CONFIG_BLK_DEV_SR_VENDOR is not set # CONFIG_CHR_DEV_SG is not set # CONFIG_CHR_DEV_SCH is not set - -# -# Some SCSI devices (e.g. CD jukebox) support multiple LUNs -# # CONFIG_SCSI_MULTI_LUN is not set # CONFIG_SCSI_CONSTANTS is not set # CONFIG_SCSI_LOGGING is not set @@ -686,6 +717,7 @@ CONFIG_SCSI_WAIT_SCAN=m # CONFIG_SCSI_SRP_ATTRS is not set # CONFIG_SCSI_LOWLEVEL is not set # CONFIG_SCSI_DH is not set +# CONFIG_SCSI_OSD_INITIATOR is not set # CONFIG_ATA is not set # CONFIG_MD is not set CONFIG_NETDEVICES=y @@ -710,6 +742,9 @@ CONFIG_PHYLIB=y # CONFIG_BROADCOM_PHY is not set # CONFIG_ICPLUS_PHY is not set # CONFIG_REALTEK_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_LSI_ET1011C_PHY is not set # CONFIG_FIXED_PHY is not set # CONFIG_MDIO_BITBANG is not set CONFIG_NET_ETHERNET=y @@ -720,9 +755,12 @@ CONFIG_BFIN_TX_DESC_NUM=10 CONFIG_BFIN_RX_DESC_NUM=20 CONFIG_BFIN_MAC_RMII=y # CONFIG_SMC91X is not set -# CONFIG_SMSC911X is not set # CONFIG_DM9000 is not set # CONFIG_ENC28J60 is not set +# CONFIG_ETHOC is not set +# CONFIG_SMSC911X is not set +# CONFIG_DNET is not set +# CONFIG_ADF702X is not set # CONFIG_IBM_NEW_EMAC_ZMII is not set # CONFIG_IBM_NEW_EMAC_RGMII is not set # CONFIG_IBM_NEW_EMAC_TAH is not set @@ -731,15 +769,16 @@ CONFIG_BFIN_MAC_RMII=y # CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set # CONFIG_B44 is not set +# CONFIG_KS8842 is not set +# CONFIG_KS8851 is not set +# CONFIG_KS8851_MLL is not set # CONFIG_NETDEV_1000 is not set # CONFIG_NETDEV_10000 is not set +# CONFIG_WLAN is not set # -# Wireless LAN +# Enable WiMAX (Networking options) to see the WiMAX drivers # -# CONFIG_WLAN_PRE80211 is not set -# CONFIG_WLAN_80211 is not set -# CONFIG_IWLWIFI_LEDS is not set # # USB Network Adapters @@ -789,7 +828,11 @@ CONFIG_INPUT_MISC=y # CONFIG_INPUT_YEALINK is not set # CONFIG_INPUT_CM109 is not set # CONFIG_INPUT_UINPUT is not set -# CONFIG_CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set +# CONFIG_INPUT_BFIN_ROTARY is not set +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_PCF8574 is not set # # Hardware I/O ports @@ -800,16 +843,13 @@ CONFIG_INPUT_MISC=y # # Character devices # -# CONFIG_AD9960 is not set CONFIG_BFIN_DMA_INTERFACE=m # CONFIG_BFIN_PPI is not set # CONFIG_BFIN_PPIFCD is not set # CONFIG_BFIN_SIMPLE_TIMER is not set # CONFIG_BFIN_SPI_ADC is not set CONFIG_BFIN_SPORT=m -# CONFIG_BFIN_TIMER_LATENCY is not set # CONFIG_BFIN_TWI_LCD is not set -CONFIG_SIMPLE_GPIO=m CONFIG_VT=y CONFIG_CONSOLE_TRANSLATIONS=y CONFIG_VT_CONSOLE=y @@ -827,6 +867,7 @@ CONFIG_BFIN_JTAG_COMM=m # # Non-8250 serial port support # +# CONFIG_SERIAL_MAX3100 is not set CONFIG_SERIAL_BFIN=y CONFIG_SERIAL_BFIN_CONSOLE=y CONFIG_SERIAL_BFIN_DMA=y @@ -838,14 +879,10 @@ CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y # CONFIG_SERIAL_BFIN_SPORT is not set CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set # CONFIG_LEGACY_PTYS is not set CONFIG_BFIN_OTP=y # CONFIG_BFIN_OTP_WRITE_ENABLE is not set - -# -# CAN, the car bus and industrial fieldbus -# -# CONFIG_CAN4LINUX is not set # CONFIG_IPMI_HANDLER is not set # CONFIG_HW_RANDOM is not set # CONFIG_R3964 is not set @@ -853,6 +890,7 @@ CONFIG_BFIN_OTP=y # CONFIG_TCG_TPM is not set CONFIG_I2C=y CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y CONFIG_I2C_CHARDEV=m CONFIG_I2C_HELPER_AUTO=y @@ -886,14 +924,6 @@ CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_EEPROM_AT24 is not set -# CONFIG_SENSORS_AD5252 is not set -# CONFIG_EEPROM_LEGACY is not set -# CONFIG_SENSORS_PCF8574 is not set -# CONFIG_PCF8575 is not set -# CONFIG_SENSORS_PCA9539 is not set -# CONFIG_SENSORS_PCF8591 is not set -# CONFIG_SENSORS_MAX6875 is not set # CONFIG_SENSORS_TSL2550 is not set # CONFIG_I2C_DEBUG_CORE is not set # CONFIG_I2C_DEBUG_ALGO is not set @@ -910,13 +940,18 @@ CONFIG_SPI_BFIN=y # CONFIG_SPI_BFIN_LOCK is not set # CONFIG_SPI_BFIN_SPORT is not set # CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_GPIO is not set # # SPI Protocol Masters # -# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set + +# +# PPS support +# +# CONFIG_PPS is not set CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y CONFIG_GPIOLIB=y # CONFIG_DEBUG_GPIO is not set @@ -932,6 +967,7 @@ CONFIG_GPIO_SYSFS=y # CONFIG_GPIO_MAX732X is not set # CONFIG_GPIO_PCA953X is not set # CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_ADP5588 is not set # # PCI GPIO expanders: @@ -942,11 +978,15 @@ CONFIG_GPIO_SYSFS=y # # CONFIG_GPIO_MAX7301 is not set # CONFIG_GPIO_MCP23S08 is not set +# CONFIG_GPIO_MC33880 is not set + +# +# AC97 GPIO expanders: +# # CONFIG_W1 is not set # CONFIG_POWER_SUPPLY is not set # CONFIG_HWMON is not set # CONFIG_THERMAL is not set -# CONFIG_THERMAL_HWMON is not set CONFIG_WATCHDOG=y # CONFIG_WATCHDOG_NOWAYOUT is not set @@ -973,28 +1013,21 @@ CONFIG_SSB_POSSIBLE=y # CONFIG_MFD_CORE is not set # CONFIG_MFD_SM501 is not set # CONFIG_HTC_PASIC3 is not set +# CONFIG_UCB1400_CORE is not set +# CONFIG_TPS65010 is not set +# CONFIG_TWL4030_CORE is not set # CONFIG_MFD_TMIO is not set # CONFIG_PMIC_DA903X is not set # CONFIG_PMIC_ADP5520 is not set # CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X is not set # CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_MC13783 is not set +# CONFIG_AB3100_CORE is not set +# CONFIG_EZX_PCAP is not set # CONFIG_REGULATOR is not set - -# -# Multimedia devices -# - -# -# Multimedia core support -# -# CONFIG_VIDEO_DEV is not set -# CONFIG_DVB_CORE is not set -# CONFIG_VIDEO_MEDIA is not set - -# -# Multimedia drivers -# -# CONFIG_DAB is not set +# CONFIG_MEDIA_SUPPORT is not set # # Graphics support @@ -1030,15 +1063,18 @@ CONFIG_FB_BFIN_T350MCQB=y # CONFIG_FB_VIRTUAL is not set # CONFIG_FB_METRONOME is not set # CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set CONFIG_BACKLIGHT_LCD_SUPPORT=y CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_LMS283GF05 is not set CONFIG_LCD_LTV350QV=m # CONFIG_LCD_ILI9320 is not set # CONFIG_LCD_TDO24M is not set # CONFIG_LCD_VGG2432A4 is not set # CONFIG_LCD_PLATFORM is not set CONFIG_BACKLIGHT_CLASS_DEVICE=m -# CONFIG_BACKLIGHT_CORGI is not set +CONFIG_BACKLIGHT_GENERIC=m +# CONFIG_BACKLIGHT_ADP8870 is not set # # Display device support @@ -1066,6 +1102,7 @@ CONFIG_SOUND=m CONFIG_SND=m CONFIG_SND_TIMER=m CONFIG_SND_PCM=m +CONFIG_SND_JACK=y # CONFIG_SND_SEQUENCER is not set # CONFIG_SND_MIXER_OSS is not set # CONFIG_SND_PCM_OSS is not set @@ -1074,6 +1111,11 @@ CONFIG_SND_SUPPORT_OLD_API=y CONFIG_SND_VERBOSE_PROCFS=y # CONFIG_SND_VERBOSE_PRINTK is not set # CONFIG_SND_DEBUG is not set +# CONFIG_SND_RAWMIDI_SEQ is not set +# CONFIG_SND_OPL3_LIB_SEQ is not set +# CONFIG_SND_OPL4_LIB_SEQ is not set +# CONFIG_SND_SBAWE_SEQ is not set +# CONFIG_SND_EMU10K1_SEQ is not set CONFIG_SND_DRIVERS=y # CONFIG_SND_DUMMY is not set # CONFIG_SND_MTPAV is not set @@ -1084,7 +1126,6 @@ CONFIG_SND_SPI=y # # ALSA Blackfin devices # -# CONFIG_SND_BLACKFIN_AD1836 is not set # CONFIG_SND_BFIN_AD73322 is not set CONFIG_SND_USB=y # CONFIG_SND_USB_AUDIO is not set @@ -1094,15 +1135,19 @@ CONFIG_SND_SOC_AC97_BUS=y CONFIG_SND_BF5XX_I2S=m CONFIG_SND_BF5XX_SOC_SSM2602=m # CONFIG_SND_BF5XX_SOC_AD73311 is not set +# CONFIG_SND_BF5XX_SOC_ADAU1371 is not set +# CONFIG_SND_BF5XX_SOC_ADAU1761 is not set +# CONFIG_SND_BF5XX_TDM is not set CONFIG_SND_BF5XX_AC97=m CONFIG_SND_BF5XX_MMAP_SUPPORT=y # CONFIG_SND_BF5XX_MULTICHAN_SUPPORT is not set +# CONFIG_SND_BF5XX_HAVE_COLD_RESET is not set +CONFIG_SND_BF5XX_SOC_AD1980=m CONFIG_SND_BF5XX_SOC_SPORT=m CONFIG_SND_BF5XX_SOC_I2S=m CONFIG_SND_BF5XX_SOC_AC97=m -CONFIG_SND_BF5XX_SOC_AD1980=m CONFIG_SND_BF5XX_SPORT_NUM=0 -# CONFIG_SND_BF5XX_HAVE_COLD_RESET is not set +CONFIG_SND_SOC_I2C_AND_SPI=m # CONFIG_SND_SOC_ALL_CODECS is not set CONFIG_SND_SOC_AD1980=m CONFIG_SND_SOC_SSM2602=m @@ -1110,7 +1155,6 @@ CONFIG_SND_SOC_SSM2602=m CONFIG_AC97_BUS=m CONFIG_HID_SUPPORT=y CONFIG_HID=y -# CONFIG_HID_DEBUG is not set # CONFIG_HIDRAW is not set # @@ -1123,30 +1167,35 @@ CONFIG_USB_HID=y # # Special HID drivers # -CONFIG_HID_COMPAT=y CONFIG_HID_A4TECH=y CONFIG_HID_APPLE=y CONFIG_HID_BELKIN=y -CONFIG_HID_BRIGHT=y CONFIG_HID_CHERRY=y CONFIG_HID_CHICONY=y CONFIG_HID_CYPRESS=y -CONFIG_HID_DELL=y +# CONFIG_HID_DRAGONRISE is not set CONFIG_HID_EZKEY=y +# CONFIG_HID_KYE is not set CONFIG_HID_GYRATION=y +# CONFIG_HID_TWINHAN is not set +# CONFIG_HID_KENSINGTON is not set CONFIG_HID_LOGITECH=y # CONFIG_LOGITECH_FF is not set # CONFIG_LOGIRUMBLEPAD2_FF is not set CONFIG_HID_MICROSOFT=y CONFIG_HID_MONTEREY=y +# CONFIG_HID_NTRIG is not set CONFIG_HID_PANTHERLORD=y # CONFIG_PANTHERLORD_FF is not set CONFIG_HID_PETALYNX=y CONFIG_HID_SAMSUNG=y CONFIG_HID_SONY=y CONFIG_HID_SUNPLUS=y -CONFIG_THRUSTMASTER_FF=m -CONFIG_ZEROPLUS_FF=m +# CONFIG_HID_GREENASIA is not set +# CONFIG_HID_SMARTJOYPLUS is not set +# CONFIG_HID_TOPSEED is not set +# CONFIG_HID_THRUSTMASTER is not set +# CONFIG_HID_ZEROPLUS is not set CONFIG_USB_SUPPORT=y CONFIG_USB_ARCH_HAS_HCD=y # CONFIG_USB_ARCH_HAS_OHCI is not set @@ -1172,6 +1221,7 @@ CONFIG_USB_MON=y # USB Host Controller Drivers # # CONFIG_USB_C67X00_HCD is not set +# CONFIG_USB_OXU210HP_HCD is not set # CONFIG_USB_ISP116X_HCD is not set # CONFIG_USB_ISP1760_HCD is not set # CONFIG_USB_ISP1362_HCD is not set @@ -1188,9 +1238,7 @@ CONFIG_USB_MUSB_HOST=y # CONFIG_USB_MUSB_PERIPHERAL is not set # CONFIG_USB_MUSB_OTG is not set CONFIG_USB_MUSB_HDRC_HCD=y -# CONFIG_MUSB_PIO_ONLY is not set -CONFIG_USB_INVENTRA_DMA=y -# CONFIG_USB_TI_CPPI_DMA is not set +CONFIG_MUSB_PIO_ONLY=y # CONFIG_USB_MUSB_DEBUG is not set # @@ -1202,18 +1250,17 @@ CONFIG_USB_INVENTRA_DMA=y # CONFIG_USB_TMC is not set # -# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed; +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may # # -# see USB_STORAGE Help for more information +# also be needed; see USB_STORAGE Help for more info # CONFIG_USB_STORAGE=y # CONFIG_USB_STORAGE_DEBUG is not set # CONFIG_USB_STORAGE_DATAFAB is not set # CONFIG_USB_STORAGE_FREECOM is not set # CONFIG_USB_STORAGE_ISD200 is not set -# CONFIG_USB_STORAGE_DPCM is not set # CONFIG_USB_STORAGE_USBAT is not set # CONFIG_USB_STORAGE_SDDR09 is not set # CONFIG_USB_STORAGE_SDDR55 is not set @@ -1249,7 +1296,6 @@ CONFIG_USB_STORAGE=y # CONFIG_USB_LED is not set # CONFIG_USB_CYPRESS_CY7C63 is not set # CONFIG_USB_CYTHERM is not set -# CONFIG_USB_PHIDGET is not set # CONFIG_USB_IDMOUSE is not set # CONFIG_USB_FTDI_ELAN is not set # CONFIG_USB_APPLEDISPLAY is not set @@ -1261,6 +1307,13 @@ CONFIG_USB_STORAGE=y # CONFIG_USB_ISIGHTFW is not set # CONFIG_USB_VST is not set # CONFIG_USB_GADGET is not set + +# +# OTG and related infrastructure +# +CONFIG_USB_OTG_UTILS=y +# CONFIG_USB_GPIO_VBUS is not set +CONFIG_NOP_USB_XCEIV=y # CONFIG_MMC is not set # CONFIG_MEMSTICK is not set # CONFIG_NEW_LEDS is not set @@ -1296,6 +1349,7 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_S35390A is not set # CONFIG_RTC_DRV_FM3130 is not set # CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set # # SPI RTC drivers @@ -1307,6 +1361,7 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_R9701 is not set # CONFIG_RTC_DRV_RS5C348 is not set # CONFIG_RTC_DRV_DS3234 is not set +# CONFIG_RTC_DRV_PCF2123 is not set # # Platform RTC drivers @@ -1327,9 +1382,20 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_BFIN=y # CONFIG_DMADEVICES is not set +# CONFIG_AUXDISPLAY is not set # CONFIG_UIO is not set + +# +# TI VLYNQ +# # CONFIG_STAGING is not set +# +# Firmware Drivers +# +# CONFIG_FIRMWARE_MEMMAP is not set +# CONFIG_SIGMA is not set + # # File systems # @@ -1340,9 +1406,13 @@ CONFIG_EXT2_FS=m # CONFIG_REISERFS_FS is not set # CONFIG_JFS_FS is not set # CONFIG_FS_POSIX_ACL is not set -CONFIG_FILE_LOCKING=y # CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set # CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y # CONFIG_DNOTIFY is not set CONFIG_INOTIFY=y CONFIG_INOTIFY_USER=y @@ -1351,6 +1421,11 @@ CONFIG_INOTIFY_USER=y # CONFIG_AUTOFS4_FS is not set # CONFIG_FUSE_FS is not set +# +# Caches +# +# CONFIG_FSCACHE is not set + # # CD-ROM/DVD Filesystems # @@ -1376,13 +1451,9 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" CONFIG_PROC_FS=y CONFIG_PROC_SYSCTL=y CONFIG_SYSFS=y -# CONFIG_TMPFS is not set # CONFIG_HUGETLB_PAGE is not set # CONFIG_CONFIGFS_FS is not set - -# -# Miscellaneous filesystems -# +CONFIG_MISC_FILESYSTEMS=y # CONFIG_ADFS_FS is not set # CONFIG_AFFS_FS is not set # CONFIG_HFS_FS is not set @@ -1401,17 +1472,8 @@ CONFIG_JFFS2_ZLIB=y # CONFIG_JFFS2_LZO is not set CONFIG_JFFS2_RTIME=y # CONFIG_JFFS2_RUBIN is not set -CONFIG_YAFFS_FS=m -CONFIG_YAFFS_YAFFS1=y -# CONFIG_YAFFS_9BYTE_TAGS is not set -# CONFIG_YAFFS_DOES_ECC is not set -CONFIG_YAFFS_YAFFS2=y -CONFIG_YAFFS_AUTO_YAFFS2=y -# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set -# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set -# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set -CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y # CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set # CONFIG_VXFS_FS is not set # CONFIG_MINIX_FS is not set # CONFIG_OMFS_FS is not set @@ -1430,7 +1492,6 @@ CONFIG_LOCKD=m CONFIG_LOCKD_V4=y CONFIG_NFS_COMMON=y CONFIG_SUNRPC=m -# CONFIG_SUNRPC_REGISTER_V4 is not set # CONFIG_RPCSEC_GSS_KRB5 is not set # CONFIG_RPCSEC_GSS_SPKM3 is not set CONFIG_SMB_FS=m @@ -1445,7 +1506,7 @@ CONFIG_SMB_FS=m # # CONFIG_PARTITION_ADVANCED is not set CONFIG_MSDOS_PARTITION=y -CONFIG_NLS=m +CONFIG_NLS=y CONFIG_NLS_DEFAULT="iso8859-1" CONFIG_NLS_CODEPAGE_437=m # CONFIG_NLS_CODEPAGE_737 is not set @@ -1495,14 +1556,19 @@ CONFIG_ENABLE_WARN_DEPRECATED=y CONFIG_ENABLE_MUST_CHECK=y CONFIG_FRAME_WARN=1024 # CONFIG_MAGIC_SYSRQ is not set +# CONFIG_STRIP_ASM_SYMS is not set # CONFIG_UNUSED_SYMBOLS is not set CONFIG_DEBUG_FS=y # CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_SECTION_MISMATCH=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_SHIRQ=y CONFIG_DETECT_SOFTLOCKUP=y # CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_DETECT_HUNG_TASK=y +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 CONFIG_SCHED_DEBUG=y # CONFIG_SCHEDSTATS is not set # CONFIG_TIMER_STATS is not set @@ -1510,31 +1576,39 @@ CONFIG_SCHED_DEBUG=y # CONFIG_DEBUG_SLAB is not set # CONFIG_DEBUG_SPINLOCK is not set # CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set # CONFIG_DEBUG_SPINLOCK_SLEEP is not set # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set # CONFIG_DEBUG_KOBJECT is not set CONFIG_DEBUG_BUGVERBOSE=y CONFIG_DEBUG_INFO=y # CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_NOMMU_REGIONS is not set # CONFIG_DEBUG_WRITECOUNT is not set # CONFIG_DEBUG_MEMORY_INIT is not set # CONFIG_DEBUG_LIST is not set # CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set # CONFIG_FRAME_POINTER is not set # CONFIG_BOOT_PRINTK_DELAY is not set # CONFIG_RCU_TORTURE_TEST is not set # CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_BACKTRACE_SELF_TEST is not set # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set # CONFIG_FAULT_INJECTION is not set - -# -# Tracers -# -# CONFIG_SCHED_TRACER is not set -# CONFIG_CONTEXT_SWITCH_TRACER is not set -# CONFIG_BOOT_TRACER is not set -# CONFIG_DYNAMIC_PRINTK_DEBUG is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_TRACING_SUPPORT=y +# CONFIG_FTRACE is not set +# CONFIG_BRANCH_PROFILE_NONE is not set +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_DYNAMIC_DEBUG is not set # CONFIG_SAMPLES is not set CONFIG_HAVE_ARCH_KGDB=y # CONFIG_KGDB is not set @@ -1559,6 +1633,7 @@ CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE=y CONFIG_EARLY_PRINTK=y CONFIG_CPLB_INFO=y CONFIG_ACCESS_CHECK=y +# CONFIG_BFIN_ISRAM_SELF_TEST is not set # # Security options @@ -1567,15 +1642,15 @@ CONFIG_ACCESS_CHECK=y CONFIG_SECURITY=y # CONFIG_SECURITYFS is not set # CONFIG_SECURITY_NETWORK is not set +# CONFIG_SECURITY_PATH is not set # CONFIG_SECURITY_FILE_CAPABILITIES is not set # CONFIG_SECURITY_ROOTPLUG is not set -CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0 +# CONFIG_SECURITY_TOMOYO is not set CONFIG_CRYPTO=y # # Crypto core or helper # -# CONFIG_CRYPTO_FIPS is not set # CONFIG_CRYPTO_MANAGER is not set # CONFIG_CRYPTO_MANAGER2 is not set # CONFIG_CRYPTO_GF128MUL is not set @@ -1607,11 +1682,13 @@ CONFIG_CRYPTO=y # # CONFIG_CRYPTO_HMAC is not set # CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set # # Digest # # CONFIG_CRYPTO_CRC32C is not set +# CONFIG_CRYPTO_GHASH is not set # CONFIG_CRYPTO_MD4 is not set # CONFIG_CRYPTO_MD5 is not set # CONFIG_CRYPTO_MICHAEL_MIC is not set @@ -1648,6 +1725,7 @@ CONFIG_CRYPTO=y # Compression # # CONFIG_CRYPTO_DEFLATE is not set +# CONFIG_CRYPTO_ZLIB is not set # CONFIG_CRYPTO_LZO is not set # @@ -1655,11 +1733,13 @@ CONFIG_CRYPTO=y # # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_HW=y +# CONFIG_BINARY_PRINTF is not set # # Library routines # CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y CONFIG_CRC_CCITT=m # CONFIG_CRC16 is not set # CONFIG_CRC_T10DIF is not set @@ -1669,6 +1749,8 @@ CONFIG_CRC32=y # CONFIG_LIBCRC32C is not set CONFIG_ZLIB_INFLATE=y CONFIG_ZLIB_DEFLATE=m +CONFIG_DECOMPRESS_GZIP=y CONFIG_HAS_IOMEM=y CONFIG_HAS_IOPORT=y CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/blackfin/configs/BF533-EZKIT_defconfig b/arch/blackfin/configs/BF533-EZKIT_defconfig index 50f9a23ccdbd..0b13d5836a48 100644 --- a/arch/blackfin/configs/BF533-EZKIT_defconfig +++ b/arch/blackfin/configs/BF533-EZKIT_defconfig @@ -1,22 +1,27 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.28.10 -# Thu May 21 05:50:01 2009 +# Linux kernel version: 2.6.32.2 # # CONFIG_MMU is not set # CONFIG_FPU is not set CONFIG_RWSEM_GENERIC_SPINLOCK=y # CONFIG_RWSEM_XCHGADD_ALGORITHM is not set CONFIG_BLACKFIN=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_BUG=y CONFIG_ZONE_DMA=y CONFIG_GENERIC_FIND_NEXT_BIT=y -CONFIG_GENERIC_HWEIGHT=y CONFIG_GENERIC_HARDIRQS=y CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y CONFIG_GENERIC_GPIO=y CONFIG_FORCE_MAX_ZONEORDER=14 CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y # # General setup @@ -26,22 +31,41 @@ CONFIG_BROKEN_ON_SMP=y CONFIG_INIT_ENV_ARG_LIMIT=32 CONFIG_LOCALVERSION="" CONFIG_LOCALVERSION_AUTO=y +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_BZIP2=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_BZIP2 is not set +# CONFIG_KERNEL_LZMA is not set CONFIG_SYSVIPC=y CONFIG_SYSVIPC_SYSCTL=y # CONFIG_POSIX_MQUEUE is not set # CONFIG_BSD_PROCESS_ACCT is not set # CONFIG_TASKSTATS is not set # CONFIG_AUDIT is not set + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_RCU_TRACE is not set +CONFIG_RCU_FANOUT=32 +# CONFIG_RCU_FANOUT_EXACT is not set +# CONFIG_TREE_RCU_TRACE is not set CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_CGROUPS is not set # CONFIG_GROUP_SCHED is not set +# CONFIG_CGROUPS is not set # CONFIG_SYSFS_DEPRECATED_V2 is not set # CONFIG_RELAY is not set # CONFIG_NAMESPACES is not set CONFIG_BLK_DEV_INITRD=y CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SYSCTL=y CONFIG_ANON_INODES=y @@ -62,6 +86,10 @@ CONFIG_EPOLL=y # CONFIG_TIMERFD is not set # CONFIG_EVENTFD is not set # CONFIG_AIO is not set + +# +# Kernel Performance Events And Counters +# CONFIG_VM_EVENT_COUNTERS=y CONFIG_COMPAT_BRK=y CONFIG_SLAB=y @@ -69,11 +97,15 @@ CONFIG_SLAB=y # CONFIG_SLOB is not set CONFIG_MMAP_ALLOW_UNINITIALIZED=y # CONFIG_PROFILING is not set -# CONFIG_MARKERS is not set CONFIG_HAVE_OPROFILE=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +# CONFIG_SLOW_WORK is not set # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set CONFIG_SLABINFO=y -CONFIG_TINY_SHMEM=y CONFIG_BASE_SMALL=0 CONFIG_MODULES=y # CONFIG_MODULE_FORCE_LOAD is not set @@ -81,11 +113,8 @@ CONFIG_MODULE_UNLOAD=y # CONFIG_MODULE_FORCE_UNLOAD is not set # CONFIG_MODVERSIONS is not set # CONFIG_MODULE_SRCVERSION_ALL is not set -CONFIG_KMOD=y CONFIG_BLOCK=y -# CONFIG_LBD is not set -# CONFIG_BLK_DEV_IO_TRACE is not set -# CONFIG_LSF is not set +# CONFIG_LBDAF is not set # CONFIG_BLK_DEV_BSG is not set # CONFIG_BLK_DEV_INTEGRITY is not set @@ -101,7 +130,6 @@ CONFIG_DEFAULT_AS=y # CONFIG_DEFAULT_CFQ is not set # CONFIG_DEFAULT_NOOP is not set CONFIG_DEFAULT_IOSCHED="anticipatory" -CONFIG_CLASSIC_RCU=y # CONFIG_PREEMPT_NONE is not set CONFIG_PREEMPT_VOLUNTARY=y # CONFIG_PREEMPT is not set @@ -132,15 +160,15 @@ CONFIG_BF533=y # CONFIG_BF537 is not set # CONFIG_BF538 is not set # CONFIG_BF539 is not set -# CONFIG_BF542 is not set +# CONFIG_BF542_std is not set # CONFIG_BF542M is not set -# CONFIG_BF544 is not set +# CONFIG_BF544_std is not set # CONFIG_BF544M is not set -# CONFIG_BF547 is not set +# CONFIG_BF547_std is not set # CONFIG_BF547M is not set -# CONFIG_BF548 is not set +# CONFIG_BF548_std is not set # CONFIG_BF548M is not set -# CONFIG_BF549 is not set +# CONFIG_BF549_std is not set # CONFIG_BF549M is not set # CONFIG_BF561 is not set CONFIG_BF_REV_MIN=3 @@ -228,7 +256,7 @@ CONFIG_GENERIC_TIME=y CONFIG_GENERIC_CLOCKEVENTS=y # CONFIG_TICKSOURCE_GPTMR0 is not set CONFIG_TICKSOURCE_CORETMR=y -# CONFIG_CYCLES_CLOCKSOURCE is not set +CONFIG_CYCLES_CLOCKSOURCE=y # CONFIG_GPTMR0_CLOCKSOURCE is not set CONFIG_TICK_ONESHOT=y # CONFIG_NO_HZ is not set @@ -280,7 +308,6 @@ CONFIG_FLATMEM=y CONFIG_FLAT_NODE_MEM_MAP=y CONFIG_PAGEFLAGS_EXTENDED=y CONFIG_SPLIT_PTLOCK_CPUS=4 -# CONFIG_RESOURCES_64BIT is not set # CONFIG_PHYS_ADDR_T_64BIT is not set CONFIG_ZONE_DMA_FLAG=1 CONFIG_VIRT_TO_BUS=y @@ -289,19 +316,18 @@ CONFIG_BFIN_GPTIMERS=m # CONFIG_DMA_UNCACHED_4M is not set # CONFIG_DMA_UNCACHED_2M is not set CONFIG_DMA_UNCACHED_1M=y +# CONFIG_DMA_UNCACHED_512K is not set +# CONFIG_DMA_UNCACHED_256K is not set +# CONFIG_DMA_UNCACHED_128K is not set # CONFIG_DMA_UNCACHED_NONE is not set -# -# Cache Support -# # # Cache Support # CONFIG_BFIN_ICACHE=y -# CONFIG_BFIN_ICACHE_LOCK is not set +CONFIG_BFIN_EXTMEM_ICACHEABLE=y CONFIG_BFIN_DCACHE=y # CONFIG_BFIN_DCACHE_BANKA is not set -CONFIG_BFIN_EXTMEM_ICACHEABLE=y CONFIG_BFIN_EXTMEM_DCACHEABLE=y CONFIG_BFIN_EXTMEM_WRITEBACK=y # CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set @@ -312,7 +338,7 @@ CONFIG_BFIN_EXTMEM_WRITEBACK=y # CONFIG_MPU is not set # -# Asynchonous Memory Configuration +# Asynchronous Memory Configuration # # @@ -358,6 +384,7 @@ CONFIG_PM=y CONFIG_PM_SLEEP=y CONFIG_SUSPEND=y CONFIG_SUSPEND_FREEZER=y +# CONFIG_PM_RUNTIME is not set CONFIG_ARCH_SUSPEND_POSSIBLE=y CONFIG_PM_BFIN_SLEEP_DEEPER=y # CONFIG_PM_BFIN_SLEEP is not set @@ -379,11 +406,6 @@ CONFIG_NET=y CONFIG_PACKET=y # CONFIG_PACKET_MMAP is not set CONFIG_UNIX=y -CONFIG_XFRM=y -# CONFIG_XFRM_USER is not set -# CONFIG_XFRM_SUB_POLICY is not set -# CONFIG_XFRM_MIGRATE is not set -# CONFIG_XFRM_STATISTICS is not set # CONFIG_NET_KEY is not set CONFIG_INET=y # CONFIG_IP_MULTICAST is not set @@ -407,7 +429,6 @@ CONFIG_IP_PNP=y # CONFIG_INET_XFRM_MODE_BEET is not set # CONFIG_INET_LRO is not set # CONFIG_INET_DIAG is not set -CONFIG_INET_TCP_DIAG=y # CONFIG_TCP_CONG_ADVANCED is not set CONFIG_TCP_CONG_CUBIC=y CONFIG_DEFAULT_TCP_CONG="cubic" @@ -418,6 +439,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic" # CONFIG_NETFILTER is not set # CONFIG_IP_DCCP is not set # CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set # CONFIG_TIPC is not set # CONFIG_ATM is not set # CONFIG_BRIDGE is not set @@ -431,7 +453,10 @@ CONFIG_DEFAULT_TCP_CONG="cubic" # CONFIG_LAPB is not set # CONFIG_ECONET is not set # CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set # CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set # # Network testing @@ -475,13 +500,8 @@ CONFIG_IRTTY_SIR=m # # CONFIG_BT is not set # CONFIG_AF_RXRPC is not set -# CONFIG_PHONET is not set -CONFIG_WIRELESS=y -# CONFIG_CFG80211 is not set -CONFIG_WIRELESS_OLD_REGULATORY=y -# CONFIG_WIRELESS_EXT is not set -# CONFIG_MAC80211 is not set -# CONFIG_IEEE80211 is not set +# CONFIG_WIRELESS is not set +# CONFIG_WIMAX is not set # CONFIG_RFKILL is not set # CONFIG_NET_9P is not set @@ -502,6 +522,7 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y # CONFIG_CONNECTOR is not set CONFIG_MTD=y # CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_TESTS is not set # CONFIG_MTD_CONCAT is not set CONFIG_MTD_PARTITIONS=y # CONFIG_MTD_REDBOOT_PARTS is not set @@ -559,6 +580,7 @@ CONFIG_MTD_COMPLEX_MAPPINGS=y # # CONFIG_MTD_DATAFLASH is not set # CONFIG_MTD_M25P80 is not set +# CONFIG_MTD_SST25L is not set # CONFIG_MTD_SLRAM is not set # CONFIG_MTD_PHRAM is not set # CONFIG_MTD_MTDRAM is not set @@ -573,6 +595,11 @@ CONFIG_MTD_COMPLEX_MAPPINGS=y # CONFIG_MTD_NAND is not set # CONFIG_MTD_ONENAND is not set +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set + # # UBI - Unsorted block images # @@ -590,9 +617,14 @@ CONFIG_BLK_DEV_RAM_SIZE=4096 # CONFIG_ATA_OVER_ETH is not set # CONFIG_BLK_DEV_HD is not set CONFIG_MISC_DEVICES=y -# CONFIG_EEPROM_93CX6 is not set # CONFIG_ENCLOSURE_SERVICES is not set # CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_93CX6 is not set CONFIG_HAVE_IDE=y # CONFIG_IDE is not set @@ -616,9 +648,12 @@ CONFIG_NETDEVICES=y CONFIG_NET_ETHERNET=y CONFIG_MII=y CONFIG_SMC91X=y -# CONFIG_SMSC911X is not set # CONFIG_DM9000 is not set # CONFIG_ENC28J60 is not set +# CONFIG_ETHOC is not set +# CONFIG_SMSC911X is not set +# CONFIG_DNET is not set +# CONFIG_ADF702X is not set # CONFIG_IBM_NEW_EMAC_ZMII is not set # CONFIG_IBM_NEW_EMAC_RGMII is not set # CONFIG_IBM_NEW_EMAC_TAH is not set @@ -627,15 +662,16 @@ CONFIG_SMC91X=y # CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set # CONFIG_B44 is not set +# CONFIG_KS8842 is not set +# CONFIG_KS8851 is not set +# CONFIG_KS8851_MLL is not set # CONFIG_NETDEV_1000 is not set # CONFIG_NETDEV_10000 is not set +# CONFIG_WLAN is not set # -# Wireless LAN +# Enable WiMAX (Networking options) to see the WiMAX drivers # -# CONFIG_WLAN_PRE80211 is not set -# CONFIG_WLAN_80211 is not set -# CONFIG_IWLWIFI_LEDS is not set # CONFIG_WAN is not set # CONFIG_PPP is not set # CONFIG_SLIP is not set @@ -679,15 +715,12 @@ CONFIG_INPUT_EVDEV=m # # Character devices # -# CONFIG_AD9960 is not set CONFIG_BFIN_DMA_INTERFACE=m # CONFIG_BFIN_PPI is not set # CONFIG_BFIN_PPIFCD is not set # CONFIG_BFIN_SIMPLE_TIMER is not set # CONFIG_BFIN_SPI_ADC is not set CONFIG_BFIN_SPORT=y -# CONFIG_BFIN_TIMER_LATENCY is not set -CONFIG_SIMPLE_GPIO=m # CONFIG_VT is not set # CONFIG_DEVKMEM is not set CONFIG_BFIN_JTAG_COMM=m @@ -701,6 +734,7 @@ CONFIG_BFIN_JTAG_COMM=m # # Non-8250 serial port support # +# CONFIG_SERIAL_MAX3100 is not set CONFIG_SERIAL_BFIN=y CONFIG_SERIAL_BFIN_CONSOLE=y CONFIG_SERIAL_BFIN_DMA=y @@ -711,12 +745,8 @@ CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y # CONFIG_SERIAL_BFIN_SPORT is not set CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set # CONFIG_LEGACY_PTYS is not set - -# -# CAN, the car bus and industrial fieldbus -# -# CONFIG_CAN4LINUX is not set # CONFIG_IPMI_HANDLER is not set # CONFIG_HW_RANDOM is not set # CONFIG_R3964 is not set @@ -734,13 +764,18 @@ CONFIG_SPI_BFIN=y # CONFIG_SPI_BFIN_LOCK is not set # CONFIG_SPI_BFIN_SPORT is not set # CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_GPIO is not set # # SPI Protocol Masters # -# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set + +# +# PPS support +# +# CONFIG_PPS is not set CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y CONFIG_GPIOLIB=y # CONFIG_DEBUG_GPIO is not set @@ -753,9 +788,6 @@ CONFIG_GPIO_SYSFS=y # # I2C GPIO expanders: # -# CONFIG_GPIO_MAX732X is not set -# CONFIG_GPIO_PCA953X is not set -# CONFIG_GPIO_PCF857X is not set # # PCI GPIO expanders: @@ -766,11 +798,15 @@ CONFIG_GPIO_SYSFS=y # # CONFIG_GPIO_MAX7301 is not set # CONFIG_GPIO_MCP23S08 is not set +# CONFIG_GPIO_MC33880 is not set + +# +# AC97 GPIO expanders: +# # CONFIG_W1 is not set # CONFIG_POWER_SUPPLY is not set # CONFIG_HWMON is not set # CONFIG_THERMAL is not set -# CONFIG_THERMAL_HWMON is not set CONFIG_WATCHDOG=y # CONFIG_WATCHDOG_NOWAYOUT is not set @@ -793,23 +829,10 @@ CONFIG_SSB_POSSIBLE=y # CONFIG_MFD_SM501 is not set # CONFIG_HTC_PASIC3 is not set # CONFIG_MFD_TMIO is not set +# CONFIG_MFD_MC13783 is not set +# CONFIG_EZX_PCAP is not set # CONFIG_REGULATOR is not set - -# -# Multimedia devices -# - -# -# Multimedia core support -# -# CONFIG_VIDEO_DEV is not set -# CONFIG_DVB_CORE is not set -# CONFIG_VIDEO_MEDIA is not set - -# -# Multimedia drivers -# -# CONFIG_DAB is not set +# CONFIG_MEDIA_SUPPORT is not set # # Graphics support @@ -826,14 +849,12 @@ CONFIG_SSB_POSSIBLE=y # CONFIG_SOUND is not set CONFIG_HID_SUPPORT=y CONFIG_HID=m -# CONFIG_HID_DEBUG is not set # CONFIG_HIDRAW is not set # CONFIG_HID_PID is not set # # Special HID drivers # -CONFIG_HID_COMPAT=y # CONFIG_USB_SUPPORT is not set # CONFIG_MMC is not set # CONFIG_MEMSTICK is not set @@ -864,6 +885,7 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_R9701 is not set # CONFIG_RTC_DRV_RS5C348 is not set # CONFIG_RTC_DRV_DS3234 is not set +# CONFIG_RTC_DRV_PCF2123 is not set # # Platform RTC drivers @@ -884,9 +906,19 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_BFIN=y # CONFIG_DMADEVICES is not set +# CONFIG_AUXDISPLAY is not set # CONFIG_UIO is not set + +# +# TI VLYNQ +# # CONFIG_STAGING is not set +# +# Firmware Drivers +# +# CONFIG_FIRMWARE_MEMMAP is not set + # # File systems # @@ -896,9 +928,13 @@ CONFIG_RTC_DRV_BFIN=y # CONFIG_REISERFS_FS is not set # CONFIG_JFS_FS is not set # CONFIG_FS_POSIX_ACL is not set -CONFIG_FILE_LOCKING=y # CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set # CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y # CONFIG_DNOTIFY is not set CONFIG_INOTIFY=y CONFIG_INOTIFY_USER=y @@ -907,6 +943,11 @@ CONFIG_INOTIFY_USER=y # CONFIG_AUTOFS4_FS is not set # CONFIG_FUSE_FS is not set +# +# Caches +# +# CONFIG_FSCACHE is not set + # # CD-ROM/DVD Filesystems # @@ -926,13 +967,9 @@ CONFIG_INOTIFY_USER=y CONFIG_PROC_FS=y CONFIG_PROC_SYSCTL=y CONFIG_SYSFS=y -# CONFIG_TMPFS is not set # CONFIG_HUGETLB_PAGE is not set # CONFIG_CONFIGFS_FS is not set - -# -# Miscellaneous filesystems -# +CONFIG_MISC_FILESYSTEMS=y # CONFIG_ADFS_FS is not set # CONFIG_AFFS_FS is not set # CONFIG_HFS_FS is not set @@ -951,17 +988,8 @@ CONFIG_JFFS2_ZLIB=y # CONFIG_JFFS2_LZO is not set CONFIG_JFFS2_RTIME=y # CONFIG_JFFS2_RUBIN is not set -CONFIG_YAFFS_FS=m -CONFIG_YAFFS_YAFFS1=y -# CONFIG_YAFFS_9BYTE_TAGS is not set -# CONFIG_YAFFS_DOES_ECC is not set -CONFIG_YAFFS_YAFFS2=y -CONFIG_YAFFS_AUTO_YAFFS2=y -# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set -# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set -# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set -CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y # CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set # CONFIG_VXFS_FS is not set # CONFIG_MINIX_FS is not set # CONFIG_OMFS_FS is not set @@ -980,7 +1008,6 @@ CONFIG_LOCKD=m CONFIG_LOCKD_V4=y CONFIG_NFS_COMMON=y CONFIG_SUNRPC=m -# CONFIG_SUNRPC_REGISTER_V4 is not set # CONFIG_RPCSEC_GSS_KRB5 is not set # CONFIG_RPCSEC_GSS_SPKM3 is not set CONFIG_SMB_FS=m @@ -1045,14 +1072,19 @@ CONFIG_ENABLE_WARN_DEPRECATED=y CONFIG_ENABLE_MUST_CHECK=y CONFIG_FRAME_WARN=1024 # CONFIG_MAGIC_SYSRQ is not set +# CONFIG_STRIP_ASM_SYMS is not set # CONFIG_UNUSED_SYMBOLS is not set CONFIG_DEBUG_FS=y # CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_SECTION_MISMATCH=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_SHIRQ=y CONFIG_DETECT_SOFTLOCKUP=y # CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_DETECT_HUNG_TASK=y +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 CONFIG_SCHED_DEBUG=y # CONFIG_SCHEDSTATS is not set # CONFIG_TIMER_STATS is not set @@ -1060,31 +1092,39 @@ CONFIG_SCHED_DEBUG=y # CONFIG_DEBUG_SLAB is not set # CONFIG_DEBUG_SPINLOCK is not set # CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set # CONFIG_DEBUG_SPINLOCK_SLEEP is not set # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set # CONFIG_DEBUG_KOBJECT is not set CONFIG_DEBUG_BUGVERBOSE=y CONFIG_DEBUG_INFO=y # CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_NOMMU_REGIONS is not set # CONFIG_DEBUG_WRITECOUNT is not set # CONFIG_DEBUG_MEMORY_INIT is not set # CONFIG_DEBUG_LIST is not set # CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set # CONFIG_FRAME_POINTER is not set # CONFIG_BOOT_PRINTK_DELAY is not set # CONFIG_RCU_TORTURE_TEST is not set # CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_BACKTRACE_SELF_TEST is not set # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set # CONFIG_FAULT_INJECTION is not set - -# -# Tracers -# -# CONFIG_SCHED_TRACER is not set -# CONFIG_CONTEXT_SWITCH_TRACER is not set -# CONFIG_BOOT_TRACER is not set -# CONFIG_DYNAMIC_PRINTK_DEBUG is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_TRACING_SUPPORT=y +# CONFIG_FTRACE is not set +# CONFIG_BRANCH_PROFILE_NONE is not set +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_DYNAMIC_DEBUG is not set # CONFIG_SAMPLES is not set CONFIG_HAVE_ARCH_KGDB=y # CONFIG_KGDB is not set @@ -1109,6 +1149,7 @@ CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE=y CONFIG_EARLY_PRINTK=y CONFIG_CPLB_INFO=y CONFIG_ACCESS_CHECK=y +# CONFIG_BFIN_ISRAM_SELF_TEST is not set # # Security options @@ -1117,14 +1158,14 @@ CONFIG_ACCESS_CHECK=y CONFIG_SECURITY=y # CONFIG_SECURITYFS is not set # CONFIG_SECURITY_NETWORK is not set +# CONFIG_SECURITY_PATH is not set # CONFIG_SECURITY_FILE_CAPABILITIES is not set -CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0 +# CONFIG_SECURITY_TOMOYO is not set CONFIG_CRYPTO=y # # Crypto core or helper # -# CONFIG_CRYPTO_FIPS is not set # CONFIG_CRYPTO_MANAGER is not set # CONFIG_CRYPTO_MANAGER2 is not set # CONFIG_CRYPTO_GF128MUL is not set @@ -1156,11 +1197,13 @@ CONFIG_CRYPTO=y # # CONFIG_CRYPTO_HMAC is not set # CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set # # Digest # # CONFIG_CRYPTO_CRC32C is not set +# CONFIG_CRYPTO_GHASH is not set # CONFIG_CRYPTO_MD4 is not set # CONFIG_CRYPTO_MD5 is not set # CONFIG_CRYPTO_MICHAEL_MIC is not set @@ -1197,6 +1240,7 @@ CONFIG_CRYPTO=y # Compression # # CONFIG_CRYPTO_DEFLATE is not set +# CONFIG_CRYPTO_ZLIB is not set # CONFIG_CRYPTO_LZO is not set # @@ -1204,11 +1248,13 @@ CONFIG_CRYPTO=y # # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_HW=y +# CONFIG_BINARY_PRINTF is not set # # Library routines # CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y CONFIG_CRC_CCITT=m # CONFIG_CRC16 is not set # CONFIG_CRC_T10DIF is not set @@ -1218,6 +1264,8 @@ CONFIG_CRC32=y # CONFIG_LIBCRC32C is not set CONFIG_ZLIB_INFLATE=y CONFIG_ZLIB_DEFLATE=m +CONFIG_DECOMPRESS_GZIP=y CONFIG_HAS_IOMEM=y CONFIG_HAS_IOPORT=y CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/blackfin/configs/BF533-STAMP_defconfig b/arch/blackfin/configs/BF533-STAMP_defconfig index 6c60c8286318..c3fe6e5b612f 100644 --- a/arch/blackfin/configs/BF533-STAMP_defconfig +++ b/arch/blackfin/configs/BF533-STAMP_defconfig @@ -1,22 +1,27 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.28.10 -# Thu May 21 05:50:01 2009 +# Linux kernel version: 2.6.32.2 # # CONFIG_MMU is not set # CONFIG_FPU is not set CONFIG_RWSEM_GENERIC_SPINLOCK=y # CONFIG_RWSEM_XCHGADD_ALGORITHM is not set CONFIG_BLACKFIN=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_BUG=y CONFIG_ZONE_DMA=y CONFIG_GENERIC_FIND_NEXT_BIT=y -CONFIG_GENERIC_HWEIGHT=y CONFIG_GENERIC_HARDIRQS=y CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y CONFIG_GENERIC_GPIO=y CONFIG_FORCE_MAX_ZONEORDER=14 CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y # # General setup @@ -26,22 +31,41 @@ CONFIG_BROKEN_ON_SMP=y CONFIG_INIT_ENV_ARG_LIMIT=32 CONFIG_LOCALVERSION="" CONFIG_LOCALVERSION_AUTO=y +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_BZIP2=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_BZIP2 is not set +# CONFIG_KERNEL_LZMA is not set CONFIG_SYSVIPC=y CONFIG_SYSVIPC_SYSCTL=y # CONFIG_POSIX_MQUEUE is not set # CONFIG_BSD_PROCESS_ACCT is not set # CONFIG_TASKSTATS is not set # CONFIG_AUDIT is not set + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_RCU_TRACE is not set +CONFIG_RCU_FANOUT=32 +# CONFIG_RCU_FANOUT_EXACT is not set +# CONFIG_TREE_RCU_TRACE is not set CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_CGROUPS is not set # CONFIG_GROUP_SCHED is not set +# CONFIG_CGROUPS is not set # CONFIG_SYSFS_DEPRECATED_V2 is not set # CONFIG_RELAY is not set # CONFIG_NAMESPACES is not set CONFIG_BLK_DEV_INITRD=y CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SYSCTL=y CONFIG_ANON_INODES=y @@ -62,6 +86,10 @@ CONFIG_EPOLL=y # CONFIG_TIMERFD is not set # CONFIG_EVENTFD is not set # CONFIG_AIO is not set + +# +# Kernel Performance Events And Counters +# CONFIG_VM_EVENT_COUNTERS=y CONFIG_COMPAT_BRK=y CONFIG_SLAB=y @@ -69,11 +97,15 @@ CONFIG_SLAB=y # CONFIG_SLOB is not set CONFIG_MMAP_ALLOW_UNINITIALIZED=y # CONFIG_PROFILING is not set -# CONFIG_MARKERS is not set CONFIG_HAVE_OPROFILE=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +# CONFIG_SLOW_WORK is not set # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set CONFIG_SLABINFO=y -CONFIG_TINY_SHMEM=y CONFIG_BASE_SMALL=0 CONFIG_MODULES=y # CONFIG_MODULE_FORCE_LOAD is not set @@ -81,11 +113,8 @@ CONFIG_MODULE_UNLOAD=y # CONFIG_MODULE_FORCE_UNLOAD is not set # CONFIG_MODVERSIONS is not set # CONFIG_MODULE_SRCVERSION_ALL is not set -CONFIG_KMOD=y CONFIG_BLOCK=y -# CONFIG_LBD is not set -# CONFIG_BLK_DEV_IO_TRACE is not set -# CONFIG_LSF is not set +# CONFIG_LBDAF is not set # CONFIG_BLK_DEV_BSG is not set # CONFIG_BLK_DEV_INTEGRITY is not set @@ -101,7 +130,6 @@ CONFIG_DEFAULT_AS=y # CONFIG_DEFAULT_CFQ is not set # CONFIG_DEFAULT_NOOP is not set CONFIG_DEFAULT_IOSCHED="anticipatory" -CONFIG_CLASSIC_RCU=y # CONFIG_PREEMPT_NONE is not set CONFIG_PREEMPT_VOLUNTARY=y # CONFIG_PREEMPT is not set @@ -132,15 +160,15 @@ CONFIG_BF533=y # CONFIG_BF537 is not set # CONFIG_BF538 is not set # CONFIG_BF539 is not set -# CONFIG_BF542 is not set +# CONFIG_BF542_std is not set # CONFIG_BF542M is not set -# CONFIG_BF544 is not set +# CONFIG_BF544_std is not set # CONFIG_BF544M is not set -# CONFIG_BF547 is not set +# CONFIG_BF547_std is not set # CONFIG_BF547M is not set -# CONFIG_BF548 is not set +# CONFIG_BF548_std is not set # CONFIG_BF548M is not set -# CONFIG_BF549 is not set +# CONFIG_BF549_std is not set # CONFIG_BF549M is not set # CONFIG_BF561 is not set CONFIG_BF_REV_MIN=3 @@ -228,7 +256,7 @@ CONFIG_GENERIC_TIME=y CONFIG_GENERIC_CLOCKEVENTS=y # CONFIG_TICKSOURCE_GPTMR0 is not set CONFIG_TICKSOURCE_CORETMR=y -# CONFIG_CYCLES_CLOCKSOURCE is not set +CONFIG_CYCLES_CLOCKSOURCE=y # CONFIG_GPTMR0_CLOCKSOURCE is not set CONFIG_TICK_ONESHOT=y # CONFIG_NO_HZ is not set @@ -280,7 +308,6 @@ CONFIG_FLATMEM=y CONFIG_FLAT_NODE_MEM_MAP=y CONFIG_PAGEFLAGS_EXTENDED=y CONFIG_SPLIT_PTLOCK_CPUS=4 -# CONFIG_RESOURCES_64BIT is not set # CONFIG_PHYS_ADDR_T_64BIT is not set CONFIG_ZONE_DMA_FLAG=1 CONFIG_VIRT_TO_BUS=y @@ -289,16 +316,18 @@ CONFIG_BFIN_GPTIMERS=m # CONFIG_DMA_UNCACHED_4M is not set # CONFIG_DMA_UNCACHED_2M is not set CONFIG_DMA_UNCACHED_1M=y +# CONFIG_DMA_UNCACHED_512K is not set +# CONFIG_DMA_UNCACHED_256K is not set +# CONFIG_DMA_UNCACHED_128K is not set # CONFIG_DMA_UNCACHED_NONE is not set # # Cache Support # CONFIG_BFIN_ICACHE=y -# CONFIG_BFIN_ICACHE_LOCK is not set +CONFIG_BFIN_EXTMEM_ICACHEABLE=y CONFIG_BFIN_DCACHE=y # CONFIG_BFIN_DCACHE_BANKA is not set -CONFIG_BFIN_EXTMEM_ICACHEABLE=y CONFIG_BFIN_EXTMEM_DCACHEABLE=y CONFIG_BFIN_EXTMEM_WRITEBACK=y # CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set @@ -309,7 +338,7 @@ CONFIG_BFIN_EXTMEM_WRITEBACK=y # CONFIG_MPU is not set # -# Asynchonous Memory Configuration +# Asynchronous Memory Configuration # # @@ -355,6 +384,7 @@ CONFIG_PM=y CONFIG_PM_SLEEP=y CONFIG_SUSPEND=y CONFIG_SUSPEND_FREEZER=y +# CONFIG_PM_RUNTIME is not set CONFIG_ARCH_SUSPEND_POSSIBLE=y CONFIG_PM_BFIN_SLEEP_DEEPER=y # CONFIG_PM_BFIN_SLEEP is not set @@ -376,11 +406,6 @@ CONFIG_NET=y CONFIG_PACKET=y # CONFIG_PACKET_MMAP is not set CONFIG_UNIX=y -CONFIG_XFRM=y -# CONFIG_XFRM_USER is not set -# CONFIG_XFRM_SUB_POLICY is not set -# CONFIG_XFRM_MIGRATE is not set -# CONFIG_XFRM_STATISTICS is not set # CONFIG_NET_KEY is not set CONFIG_INET=y # CONFIG_IP_MULTICAST is not set @@ -404,7 +429,6 @@ CONFIG_IP_PNP=y # CONFIG_INET_XFRM_MODE_BEET is not set # CONFIG_INET_LRO is not set # CONFIG_INET_DIAG is not set -CONFIG_INET_TCP_DIAG=y # CONFIG_TCP_CONG_ADVANCED is not set CONFIG_TCP_CONG_CUBIC=y CONFIG_DEFAULT_TCP_CONG="cubic" @@ -415,6 +439,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic" # CONFIG_NETFILTER is not set # CONFIG_IP_DCCP is not set # CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set # CONFIG_TIPC is not set # CONFIG_ATM is not set # CONFIG_BRIDGE is not set @@ -428,7 +453,10 @@ CONFIG_DEFAULT_TCP_CONG="cubic" # CONFIG_LAPB is not set # CONFIG_ECONET is not set # CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set # CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set # # Network testing @@ -474,13 +502,8 @@ CONFIG_SIR_BFIN_DMA=y # # CONFIG_BT is not set # CONFIG_AF_RXRPC is not set -# CONFIG_PHONET is not set -CONFIG_WIRELESS=y -# CONFIG_CFG80211 is not set -CONFIG_WIRELESS_OLD_REGULATORY=y -# CONFIG_WIRELESS_EXT is not set -# CONFIG_MAC80211 is not set -# CONFIG_IEEE80211 is not set +# CONFIG_WIRELESS is not set +# CONFIG_WIMAX is not set # CONFIG_RFKILL is not set # CONFIG_NET_9P is not set @@ -501,6 +524,7 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y # CONFIG_CONNECTOR is not set CONFIG_MTD=y # CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_TESTS is not set # CONFIG_MTD_CONCAT is not set CONFIG_MTD_PARTITIONS=y # CONFIG_MTD_REDBOOT_PARTS is not set @@ -560,6 +584,7 @@ CONFIG_MTD_BFIN_ASYNC=m # # CONFIG_MTD_DATAFLASH is not set # CONFIG_MTD_M25P80 is not set +# CONFIG_MTD_SST25L is not set # CONFIG_MTD_SLRAM is not set # CONFIG_MTD_PHRAM is not set # CONFIG_MTD_MTDRAM is not set @@ -574,6 +599,11 @@ CONFIG_MTD_BFIN_ASYNC=m # CONFIG_MTD_NAND is not set # CONFIG_MTD_ONENAND is not set +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set + # # UBI - Unsorted block images # @@ -591,10 +621,20 @@ CONFIG_BLK_DEV_RAM_SIZE=4096 # CONFIG_ATA_OVER_ETH is not set # CONFIG_BLK_DEV_HD is not set CONFIG_MISC_DEVICES=y -# CONFIG_EEPROM_93CX6 is not set +# CONFIG_AD525X_DPOT is not set # CONFIG_ICS932S401 is not set # CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_ISL29003 is not set # CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set CONFIG_HAVE_IDE=y # CONFIG_IDE is not set @@ -618,9 +658,12 @@ CONFIG_NETDEVICES=y CONFIG_NET_ETHERNET=y CONFIG_MII=y CONFIG_SMC91X=y -# CONFIG_SMSC911X is not set # CONFIG_DM9000 is not set # CONFIG_ENC28J60 is not set +# CONFIG_ETHOC is not set +# CONFIG_SMSC911X is not set +# CONFIG_DNET is not set +# CONFIG_ADF702X is not set # CONFIG_IBM_NEW_EMAC_ZMII is not set # CONFIG_IBM_NEW_EMAC_RGMII is not set # CONFIG_IBM_NEW_EMAC_TAH is not set @@ -629,15 +672,16 @@ CONFIG_SMC91X=y # CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set # CONFIG_B44 is not set +# CONFIG_KS8842 is not set +# CONFIG_KS8851 is not set +# CONFIG_KS8851_MLL is not set # CONFIG_NETDEV_1000 is not set # CONFIG_NETDEV_10000 is not set +# CONFIG_WLAN is not set # -# Wireless LAN +# Enable WiMAX (Networking options) to see the WiMAX drivers # -# CONFIG_WLAN_PRE80211 is not set -# CONFIG_WLAN_80211 is not set -# CONFIG_IWLWIFI_LEDS is not set # CONFIG_WAN is not set # CONFIG_PPP is not set # CONFIG_SLIP is not set @@ -672,7 +716,10 @@ CONFIG_INPUT_EVDEV=m # CONFIG_INPUT_TOUCHSCREEN is not set CONFIG_INPUT_MISC=y # CONFIG_INPUT_UINPUT is not set -CONFIG_CONFIG_INPUT_PCF8574=m +# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_PCF8574 is not set # # Hardware I/O ports @@ -683,16 +730,13 @@ CONFIG_CONFIG_INPUT_PCF8574=m # # Character devices # -# CONFIG_AD9960 is not set CONFIG_BFIN_DMA_INTERFACE=m # CONFIG_BFIN_PPI is not set # CONFIG_BFIN_PPIFCD is not set # CONFIG_BFIN_SIMPLE_TIMER is not set # CONFIG_BFIN_SPI_ADC is not set CONFIG_BFIN_SPORT=m -# CONFIG_BFIN_TIMER_LATENCY is not set # CONFIG_BFIN_TWI_LCD is not set -CONFIG_SIMPLE_GPIO=m # CONFIG_VT is not set # CONFIG_DEVKMEM is not set CONFIG_BFIN_JTAG_COMM=m @@ -706,6 +750,7 @@ CONFIG_BFIN_JTAG_COMM=m # # Non-8250 serial port support # +# CONFIG_SERIAL_MAX3100 is not set CONFIG_SERIAL_BFIN=y CONFIG_SERIAL_BFIN_CONSOLE=y CONFIG_SERIAL_BFIN_DMA=y @@ -716,12 +761,8 @@ CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y # CONFIG_SERIAL_BFIN_SPORT is not set CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set # CONFIG_LEGACY_PTYS is not set - -# -# CAN, the car bus and industrial fieldbus -# -# CONFIG_CAN4LINUX is not set # CONFIG_IPMI_HANDLER is not set # CONFIG_HW_RANDOM is not set # CONFIG_R3964 is not set @@ -729,6 +770,7 @@ CONFIG_UNIX98_PTYS=y # CONFIG_TCG_TPM is not set CONFIG_I2C=m CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y CONFIG_I2C_CHARDEV=m CONFIG_I2C_HELPER_AUTO=y @@ -759,14 +801,6 @@ CONFIG_I2C_HELPER_AUTO=y # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_EEPROM_AT24 is not set -# CONFIG_SENSORS_AD5252 is not set -# CONFIG_EEPROM_LEGACY is not set -# CONFIG_SENSORS_PCF8574 is not set -# CONFIG_PCF8575 is not set -# CONFIG_SENSORS_PCA9539 is not set -# CONFIG_SENSORS_PCF8591 is not set -# CONFIG_SENSORS_MAX6875 is not set # CONFIG_SENSORS_TSL2550 is not set # CONFIG_I2C_DEBUG_CORE is not set # CONFIG_I2C_DEBUG_ALGO is not set @@ -783,13 +817,18 @@ CONFIG_SPI_BFIN=y # CONFIG_SPI_BFIN_LOCK is not set # CONFIG_SPI_BFIN_SPORT is not set # CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_GPIO is not set # # SPI Protocol Masters # -# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set + +# +# PPS support +# +# CONFIG_PPS is not set CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y CONFIG_GPIOLIB=y # CONFIG_DEBUG_GPIO is not set @@ -805,6 +844,7 @@ CONFIG_GPIO_SYSFS=y # CONFIG_GPIO_MAX732X is not set # CONFIG_GPIO_PCA953X is not set # CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_ADP5588 is not set # # PCI GPIO expanders: @@ -815,11 +855,15 @@ CONFIG_GPIO_SYSFS=y # # CONFIG_GPIO_MAX7301 is not set # CONFIG_GPIO_MCP23S08 is not set +# CONFIG_GPIO_MC33880 is not set + +# +# AC97 GPIO expanders: +# # CONFIG_W1 is not set # CONFIG_POWER_SUPPLY is not set # CONFIG_HWMON is not set # CONFIG_THERMAL is not set -# CONFIG_THERMAL_HWMON is not set CONFIG_WATCHDOG=y # CONFIG_WATCHDOG_NOWAYOUT is not set @@ -841,26 +885,18 @@ CONFIG_SSB_POSSIBLE=y # CONFIG_MFD_CORE is not set # CONFIG_MFD_SM501 is not set # CONFIG_HTC_PASIC3 is not set +# CONFIG_UCB1400_CORE is not set +# CONFIG_TPS65010 is not set # CONFIG_MFD_TMIO is not set # CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X is not set # CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_MC13783 is not set +# CONFIG_AB3100_CORE is not set +# CONFIG_EZX_PCAP is not set # CONFIG_REGULATOR is not set - -# -# Multimedia devices -# - -# -# Multimedia core support -# -# CONFIG_VIDEO_DEV is not set -# CONFIG_DVB_CORE is not set -# CONFIG_VIDEO_MEDIA is not set - -# -# Multimedia drivers -# -# CONFIG_DAB is not set +# CONFIG_MEDIA_SUPPORT is not set # # Graphics support @@ -904,6 +940,7 @@ CONFIG_ADV7393_1XMEM=y # CONFIG_FB_VIRTUAL is not set # CONFIG_FB_METRONOME is not set # CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set # CONFIG_BACKLIGHT_LCD_SUPPORT is not set # @@ -913,19 +950,27 @@ CONFIG_ADV7393_1XMEM=y # CONFIG_LOGO is not set CONFIG_SOUND=m CONFIG_SOUND_OSS_CORE=y +CONFIG_SOUND_OSS_CORE_PRECLAIM=y CONFIG_SND=m CONFIG_SND_TIMER=m CONFIG_SND_PCM=m +CONFIG_SND_JACK=y # CONFIG_SND_SEQUENCER is not set CONFIG_SND_OSSEMUL=y CONFIG_SND_MIXER_OSS=m CONFIG_SND_PCM_OSS=m CONFIG_SND_PCM_OSS_PLUGINS=y +# CONFIG_SND_HRTIMER is not set # CONFIG_SND_DYNAMIC_MINORS is not set CONFIG_SND_SUPPORT_OLD_API=y CONFIG_SND_VERBOSE_PROCFS=y # CONFIG_SND_VERBOSE_PRINTK is not set # CONFIG_SND_DEBUG is not set +# CONFIG_SND_RAWMIDI_SEQ is not set +# CONFIG_SND_OPL3_LIB_SEQ is not set +# CONFIG_SND_OPL4_LIB_SEQ is not set +# CONFIG_SND_SBAWE_SEQ is not set +# CONFIG_SND_EMU10K1_SEQ is not set CONFIG_SND_DRIVERS=y # CONFIG_SND_DUMMY is not set # CONFIG_SND_MTPAV is not set @@ -936,13 +981,6 @@ CONFIG_SND_SPI=y # # ALSA Blackfin devices # -CONFIG_SND_BLACKFIN_AD1836=m -CONFIG_SND_BLACKFIN_AD1836_TDM=y -# CONFIG_SND_BLACKFIN_AD1836_I2S is not set -CONFIG_SND_BLACKFIN_AD1836_MULSUB=y -# CONFIG_SND_BLACKFIN_AD1836_5P1 is not set -CONFIG_SND_BLACKFIN_SPORT=0 -CONFIG_SND_BLACKFIN_SPI_PFBIT=4 CONFIG_SND_BFIN_SPORT=0 CONFIG_SND_BFIN_AD73322=m CONFIG_SND_BFIN_AD73322_SPORT0_SE=10 @@ -953,16 +991,20 @@ CONFIG_SND_SOC_AC97_BUS=y CONFIG_SND_BF5XX_I2S=m # CONFIG_SND_BF5XX_SOC_SSM2602 is not set CONFIG_SND_BF5XX_SOC_AD73311=m +# CONFIG_SND_BF5XX_SOC_ADAU1371 is not set +# CONFIG_SND_BF5XX_SOC_ADAU1761 is not set CONFIG_SND_BFIN_AD73311_SE=4 +# CONFIG_SND_BF5XX_TDM is not set CONFIG_SND_BF5XX_AC97=m CONFIG_SND_BF5XX_MMAP_SUPPORT=y # CONFIG_SND_BF5XX_MULTICHAN_SUPPORT is not set +# CONFIG_SND_BF5XX_HAVE_COLD_RESET is not set +CONFIG_SND_BF5XX_SOC_AD1980=m CONFIG_SND_BF5XX_SOC_SPORT=m CONFIG_SND_BF5XX_SOC_I2S=m CONFIG_SND_BF5XX_SOC_AC97=m -CONFIG_SND_BF5XX_SOC_AD1980=m CONFIG_SND_BF5XX_SPORT_NUM=0 -# CONFIG_SND_BF5XX_HAVE_COLD_RESET is not set +CONFIG_SND_SOC_I2C_AND_SPI=m # CONFIG_SND_SOC_ALL_CODECS is not set CONFIG_SND_SOC_AD1980=m CONFIG_SND_SOC_AD73311=m @@ -970,14 +1012,12 @@ CONFIG_SND_SOC_AD73311=m CONFIG_AC97_BUS=m CONFIG_HID_SUPPORT=y CONFIG_HID=y -# CONFIG_HID_DEBUG is not set # CONFIG_HIDRAW is not set # CONFIG_HID_PID is not set # # Special HID drivers # -CONFIG_HID_COMPAT=y # CONFIG_USB_SUPPORT is not set # CONFIG_MMC is not set # CONFIG_MEMSTICK is not set @@ -1014,6 +1054,7 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_S35390A is not set # CONFIG_RTC_DRV_FM3130 is not set # CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set # # SPI RTC drivers @@ -1025,6 +1066,7 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_R9701 is not set # CONFIG_RTC_DRV_RS5C348 is not set # CONFIG_RTC_DRV_DS3234 is not set +# CONFIG_RTC_DRV_PCF2123 is not set # # Platform RTC drivers @@ -1045,9 +1087,20 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_BFIN=y # CONFIG_DMADEVICES is not set +# CONFIG_AUXDISPLAY is not set # CONFIG_UIO is not set + +# +# TI VLYNQ +# # CONFIG_STAGING is not set +# +# Firmware Drivers +# +# CONFIG_FIRMWARE_MEMMAP is not set +# CONFIG_SIGMA is not set + # # File systems # @@ -1057,9 +1110,13 @@ CONFIG_RTC_DRV_BFIN=y # CONFIG_REISERFS_FS is not set # CONFIG_JFS_FS is not set # CONFIG_FS_POSIX_ACL is not set -CONFIG_FILE_LOCKING=y # CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set # CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y # CONFIG_DNOTIFY is not set CONFIG_INOTIFY=y CONFIG_INOTIFY_USER=y @@ -1068,6 +1125,11 @@ CONFIG_INOTIFY_USER=y # CONFIG_AUTOFS4_FS is not set # CONFIG_FUSE_FS is not set +# +# Caches +# +# CONFIG_FSCACHE is not set + # # CD-ROM/DVD Filesystems # @@ -1087,13 +1149,9 @@ CONFIG_INOTIFY_USER=y CONFIG_PROC_FS=y CONFIG_PROC_SYSCTL=y CONFIG_SYSFS=y -# CONFIG_TMPFS is not set # CONFIG_HUGETLB_PAGE is not set # CONFIG_CONFIGFS_FS is not set - -# -# Miscellaneous filesystems -# +CONFIG_MISC_FILESYSTEMS=y # CONFIG_ADFS_FS is not set # CONFIG_AFFS_FS is not set # CONFIG_HFS_FS is not set @@ -1112,17 +1170,8 @@ CONFIG_JFFS2_ZLIB=y # CONFIG_JFFS2_LZO is not set CONFIG_JFFS2_RTIME=y # CONFIG_JFFS2_RUBIN is not set -CONFIG_YAFFS_FS=m -CONFIG_YAFFS_YAFFS1=y -# CONFIG_YAFFS_9BYTE_TAGS is not set -# CONFIG_YAFFS_DOES_ECC is not set -CONFIG_YAFFS_YAFFS2=y -CONFIG_YAFFS_AUTO_YAFFS2=y -# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set -# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set -# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set -CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y # CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set # CONFIG_VXFS_FS is not set # CONFIG_MINIX_FS is not set # CONFIG_OMFS_FS is not set @@ -1141,7 +1190,6 @@ CONFIG_LOCKD=m CONFIG_LOCKD_V4=y CONFIG_NFS_COMMON=y CONFIG_SUNRPC=m -# CONFIG_SUNRPC_REGISTER_V4 is not set # CONFIG_RPCSEC_GSS_KRB5 is not set # CONFIG_RPCSEC_GSS_SPKM3 is not set CONFIG_SMB_FS=m @@ -1206,14 +1254,19 @@ CONFIG_ENABLE_WARN_DEPRECATED=y CONFIG_ENABLE_MUST_CHECK=y CONFIG_FRAME_WARN=1024 # CONFIG_MAGIC_SYSRQ is not set +# CONFIG_STRIP_ASM_SYMS is not set # CONFIG_UNUSED_SYMBOLS is not set CONFIG_DEBUG_FS=y # CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_SECTION_MISMATCH=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_SHIRQ=y CONFIG_DETECT_SOFTLOCKUP=y # CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_DETECT_HUNG_TASK=y +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 CONFIG_SCHED_DEBUG=y # CONFIG_SCHEDSTATS is not set # CONFIG_TIMER_STATS is not set @@ -1221,31 +1274,39 @@ CONFIG_SCHED_DEBUG=y # CONFIG_DEBUG_SLAB is not set # CONFIG_DEBUG_SPINLOCK is not set # CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set # CONFIG_DEBUG_SPINLOCK_SLEEP is not set # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set # CONFIG_DEBUG_KOBJECT is not set CONFIG_DEBUG_BUGVERBOSE=y CONFIG_DEBUG_INFO=y # CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_NOMMU_REGIONS is not set # CONFIG_DEBUG_WRITECOUNT is not set # CONFIG_DEBUG_MEMORY_INIT is not set # CONFIG_DEBUG_LIST is not set # CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set # CONFIG_FRAME_POINTER is not set # CONFIG_BOOT_PRINTK_DELAY is not set # CONFIG_RCU_TORTURE_TEST is not set # CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_BACKTRACE_SELF_TEST is not set # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set # CONFIG_FAULT_INJECTION is not set - -# -# Tracers -# -# CONFIG_SCHED_TRACER is not set -# CONFIG_CONTEXT_SWITCH_TRACER is not set -# CONFIG_BOOT_TRACER is not set -# CONFIG_DYNAMIC_PRINTK_DEBUG is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_TRACING_SUPPORT=y +# CONFIG_FTRACE is not set +# CONFIG_BRANCH_PROFILE_NONE is not set +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_DYNAMIC_DEBUG is not set # CONFIG_SAMPLES is not set CONFIG_HAVE_ARCH_KGDB=y # CONFIG_KGDB is not set @@ -1270,6 +1331,7 @@ CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE=y CONFIG_EARLY_PRINTK=y CONFIG_CPLB_INFO=y CONFIG_ACCESS_CHECK=y +# CONFIG_BFIN_ISRAM_SELF_TEST is not set # # Security options @@ -1278,14 +1340,14 @@ CONFIG_ACCESS_CHECK=y CONFIG_SECURITY=y # CONFIG_SECURITYFS is not set # CONFIG_SECURITY_NETWORK is not set +# CONFIG_SECURITY_PATH is not set # CONFIG_SECURITY_FILE_CAPABILITIES is not set -CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0 +# CONFIG_SECURITY_TOMOYO is not set CONFIG_CRYPTO=y # # Crypto core or helper # -# CONFIG_CRYPTO_FIPS is not set # CONFIG_CRYPTO_MANAGER is not set # CONFIG_CRYPTO_MANAGER2 is not set # CONFIG_CRYPTO_GF128MUL is not set @@ -1317,11 +1379,13 @@ CONFIG_CRYPTO=y # # CONFIG_CRYPTO_HMAC is not set # CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set # # Digest # # CONFIG_CRYPTO_CRC32C is not set +# CONFIG_CRYPTO_GHASH is not set # CONFIG_CRYPTO_MD4 is not set # CONFIG_CRYPTO_MD5 is not set # CONFIG_CRYPTO_MICHAEL_MIC is not set @@ -1358,6 +1422,7 @@ CONFIG_CRYPTO=y # Compression # # CONFIG_CRYPTO_DEFLATE is not set +# CONFIG_CRYPTO_ZLIB is not set # CONFIG_CRYPTO_LZO is not set # @@ -1365,11 +1430,13 @@ CONFIG_CRYPTO=y # # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_HW=y +# CONFIG_BINARY_PRINTF is not set # # Library routines # CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y CONFIG_CRC_CCITT=m # CONFIG_CRC16 is not set # CONFIG_CRC_T10DIF is not set @@ -1379,6 +1446,8 @@ CONFIG_CRC32=y # CONFIG_LIBCRC32C is not set CONFIG_ZLIB_INFLATE=y CONFIG_ZLIB_DEFLATE=m +CONFIG_DECOMPRESS_GZIP=y CONFIG_HAS_IOMEM=y CONFIG_HAS_IOPORT=y CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/blackfin/configs/BF537-STAMP_defconfig b/arch/blackfin/configs/BF537-STAMP_defconfig index 2908595b67c5..7596cf7673f1 100644 --- a/arch/blackfin/configs/BF537-STAMP_defconfig +++ b/arch/blackfin/configs/BF537-STAMP_defconfig @@ -1,22 +1,27 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.28.10 -# Thu May 21 05:50:01 2009 +# Linux kernel version: 2.6.32.2 # # CONFIG_MMU is not set # CONFIG_FPU is not set CONFIG_RWSEM_GENERIC_SPINLOCK=y # CONFIG_RWSEM_XCHGADD_ALGORITHM is not set CONFIG_BLACKFIN=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_BUG=y CONFIG_ZONE_DMA=y CONFIG_GENERIC_FIND_NEXT_BIT=y -CONFIG_GENERIC_HWEIGHT=y CONFIG_GENERIC_HARDIRQS=y CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y CONFIG_GENERIC_GPIO=y CONFIG_FORCE_MAX_ZONEORDER=14 CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y # # General setup @@ -26,22 +31,41 @@ CONFIG_BROKEN_ON_SMP=y CONFIG_INIT_ENV_ARG_LIMIT=32 CONFIG_LOCALVERSION="" CONFIG_LOCALVERSION_AUTO=y +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_BZIP2=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_BZIP2 is not set +# CONFIG_KERNEL_LZMA is not set CONFIG_SYSVIPC=y CONFIG_SYSVIPC_SYSCTL=y # CONFIG_POSIX_MQUEUE is not set # CONFIG_BSD_PROCESS_ACCT is not set # CONFIG_TASKSTATS is not set # CONFIG_AUDIT is not set + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_RCU_TRACE is not set +CONFIG_RCU_FANOUT=32 +# CONFIG_RCU_FANOUT_EXACT is not set +# CONFIG_TREE_RCU_TRACE is not set CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_CGROUPS is not set # CONFIG_GROUP_SCHED is not set +# CONFIG_CGROUPS is not set # CONFIG_SYSFS_DEPRECATED_V2 is not set # CONFIG_RELAY is not set # CONFIG_NAMESPACES is not set CONFIG_BLK_DEV_INITRD=y CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SYSCTL=y CONFIG_ANON_INODES=y @@ -62,6 +86,10 @@ CONFIG_EPOLL=y # CONFIG_TIMERFD is not set # CONFIG_EVENTFD is not set # CONFIG_AIO is not set + +# +# Kernel Performance Events And Counters +# CONFIG_VM_EVENT_COUNTERS=y CONFIG_COMPAT_BRK=y CONFIG_SLAB=y @@ -69,11 +97,15 @@ CONFIG_SLAB=y # CONFIG_SLOB is not set CONFIG_MMAP_ALLOW_UNINITIALIZED=y # CONFIG_PROFILING is not set -# CONFIG_MARKERS is not set CONFIG_HAVE_OPROFILE=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +# CONFIG_SLOW_WORK is not set # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set CONFIG_SLABINFO=y -CONFIG_TINY_SHMEM=y CONFIG_BASE_SMALL=0 CONFIG_MODULES=y # CONFIG_MODULE_FORCE_LOAD is not set @@ -81,11 +113,8 @@ CONFIG_MODULE_UNLOAD=y # CONFIG_MODULE_FORCE_UNLOAD is not set # CONFIG_MODVERSIONS is not set # CONFIG_MODULE_SRCVERSION_ALL is not set -CONFIG_KMOD=y CONFIG_BLOCK=y -# CONFIG_LBD is not set -# CONFIG_BLK_DEV_IO_TRACE is not set -# CONFIG_LSF is not set +# CONFIG_LBDAF is not set # CONFIG_BLK_DEV_BSG is not set # CONFIG_BLK_DEV_INTEGRITY is not set @@ -101,7 +130,6 @@ CONFIG_DEFAULT_AS=y # CONFIG_DEFAULT_CFQ is not set # CONFIG_DEFAULT_NOOP is not set CONFIG_DEFAULT_IOSCHED="anticipatory" -CONFIG_CLASSIC_RCU=y # CONFIG_PREEMPT_NONE is not set CONFIG_PREEMPT_VOLUNTARY=y # CONFIG_PREEMPT is not set @@ -132,15 +160,15 @@ CONFIG_FREEZER=y CONFIG_BF537=y # CONFIG_BF538 is not set # CONFIG_BF539 is not set -# CONFIG_BF542 is not set +# CONFIG_BF542_std is not set # CONFIG_BF542M is not set -# CONFIG_BF544 is not set +# CONFIG_BF544_std is not set # CONFIG_BF544M is not set -# CONFIG_BF547 is not set +# CONFIG_BF547_std is not set # CONFIG_BF547M is not set -# CONFIG_BF548 is not set +# CONFIG_BF548_std is not set # CONFIG_BF548M is not set -# CONFIG_BF549 is not set +# CONFIG_BF549_std is not set # CONFIG_BF549M is not set # CONFIG_BF561 is not set CONFIG_BF_REV_MIN=2 @@ -184,7 +212,8 @@ CONFIG_IRQ_MEM_DMA1=13 CONFIG_IRQ_WATCH=13 CONFIG_IRQ_SPI=10 CONFIG_BFIN537_STAMP=y -# CONFIG_BFIN537_BLUETECHNIX_CM is not set +# CONFIG_BFIN537_BLUETECHNIX_CM_E is not set +# CONFIG_BFIN537_BLUETECHNIX_CM_U is not set # CONFIG_BFIN537_BLUETECHNIX_TCM is not set # CONFIG_PNAV10 is not set # CONFIG_CAMSIG_MINOTAUR is not set @@ -235,7 +264,7 @@ CONFIG_GENERIC_TIME=y CONFIG_GENERIC_CLOCKEVENTS=y # CONFIG_TICKSOURCE_GPTMR0 is not set CONFIG_TICKSOURCE_CORETMR=y -# CONFIG_CYCLES_CLOCKSOURCE is not set +CONFIG_CYCLES_CLOCKSOURCE=y # CONFIG_GPTMR0_CLOCKSOURCE is not set CONFIG_TICK_ONESHOT=y # CONFIG_NO_HZ is not set @@ -287,7 +316,6 @@ CONFIG_FLATMEM=y CONFIG_FLAT_NODE_MEM_MAP=y CONFIG_PAGEFLAGS_EXTENDED=y CONFIG_SPLIT_PTLOCK_CPUS=4 -# CONFIG_RESOURCES_64BIT is not set # CONFIG_PHYS_ADDR_T_64BIT is not set CONFIG_ZONE_DMA_FLAG=1 CONFIG_VIRT_TO_BUS=y @@ -296,16 +324,18 @@ CONFIG_BFIN_GPTIMERS=m # CONFIG_DMA_UNCACHED_4M is not set # CONFIG_DMA_UNCACHED_2M is not set CONFIG_DMA_UNCACHED_1M=y +# CONFIG_DMA_UNCACHED_512K is not set +# CONFIG_DMA_UNCACHED_256K is not set +# CONFIG_DMA_UNCACHED_128K is not set # CONFIG_DMA_UNCACHED_NONE is not set # # Cache Support # CONFIG_BFIN_ICACHE=y -# CONFIG_BFIN_ICACHE_LOCK is not set +CONFIG_BFIN_EXTMEM_ICACHEABLE=y CONFIG_BFIN_DCACHE=y # CONFIG_BFIN_DCACHE_BANKA is not set -CONFIG_BFIN_EXTMEM_ICACHEABLE=y CONFIG_BFIN_EXTMEM_DCACHEABLE=y CONFIG_BFIN_EXTMEM_WRITEBACK=y # CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set @@ -316,7 +346,7 @@ CONFIG_BFIN_EXTMEM_WRITEBACK=y # CONFIG_MPU is not set # -# Asynchonous Memory Configuration +# Asynchronous Memory Configuration # # @@ -362,6 +392,7 @@ CONFIG_PM=y CONFIG_PM_SLEEP=y CONFIG_SUSPEND=y CONFIG_SUSPEND_FREEZER=y +# CONFIG_PM_RUNTIME is not set CONFIG_ARCH_SUSPEND_POSSIBLE=y CONFIG_PM_BFIN_SLEEP_DEEPER=y # CONFIG_PM_BFIN_SLEEP is not set @@ -384,11 +415,6 @@ CONFIG_NET=y CONFIG_PACKET=y # CONFIG_PACKET_MMAP is not set CONFIG_UNIX=y -CONFIG_XFRM=y -# CONFIG_XFRM_USER is not set -# CONFIG_XFRM_SUB_POLICY is not set -# CONFIG_XFRM_MIGRATE is not set -# CONFIG_XFRM_STATISTICS is not set # CONFIG_NET_KEY is not set CONFIG_INET=y # CONFIG_IP_MULTICAST is not set @@ -412,7 +438,6 @@ CONFIG_IP_PNP=y # CONFIG_INET_XFRM_MODE_BEET is not set # CONFIG_INET_LRO is not set # CONFIG_INET_DIAG is not set -CONFIG_INET_TCP_DIAG=y # CONFIG_TCP_CONG_ADVANCED is not set CONFIG_TCP_CONG_CUBIC=y CONFIG_DEFAULT_TCP_CONG="cubic" @@ -423,6 +448,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic" # CONFIG_NETFILTER is not set # CONFIG_IP_DCCP is not set # CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set # CONFIG_TIPC is not set # CONFIG_ATM is not set # CONFIG_BRIDGE is not set @@ -436,14 +462,34 @@ CONFIG_DEFAULT_TCP_CONG="cubic" # CONFIG_LAPB is not set # CONFIG_ECONET is not set # CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set # CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set # # Network testing # # CONFIG_NET_PKTGEN is not set # CONFIG_HAMRADIO is not set -# CONFIG_CAN is not set +CONFIG_CAN=m +CONFIG_CAN_RAW=m +CONFIG_CAN_BCM=m + +# +# CAN Device Drivers +# +# CONFIG_CAN_VCAN is not set +CONFIG_CAN_DEV=m +# CONFIG_CAN_CALC_BITTIMING is not set +CONFIG_CAN_BFIN=m +# CONFIG_CAN_SJA1000 is not set + +# +# CAN USB interfaces +# +# CONFIG_CAN_EMS_USB is not set +# CONFIG_CAN_DEBUG_DEVICES is not set CONFIG_IRDA=m # @@ -483,13 +529,8 @@ CONFIG_SIR_BFIN_DMA=y # # CONFIG_BT is not set # CONFIG_AF_RXRPC is not set -# CONFIG_PHONET is not set -CONFIG_WIRELESS=y -# CONFIG_CFG80211 is not set -CONFIG_WIRELESS_OLD_REGULATORY=y -# CONFIG_WIRELESS_EXT is not set -# CONFIG_MAC80211 is not set -# CONFIG_IEEE80211 is not set +# CONFIG_WIRELESS is not set +# CONFIG_WIMAX is not set # CONFIG_RFKILL is not set # CONFIG_NET_9P is not set @@ -510,6 +551,7 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y # CONFIG_CONNECTOR is not set CONFIG_MTD=y # CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_TESTS is not set # CONFIG_MTD_CONCAT is not set CONFIG_MTD_PARTITIONS=y # CONFIG_MTD_REDBOOT_PARTS is not set @@ -568,6 +610,7 @@ CONFIG_MTD_PHYSMAP=m # # CONFIG_MTD_DATAFLASH is not set # CONFIG_MTD_M25P80 is not set +# CONFIG_MTD_SST25L is not set # CONFIG_MTD_SLRAM is not set # CONFIG_MTD_PHRAM is not set # CONFIG_MTD_MTDRAM is not set @@ -582,6 +625,11 @@ CONFIG_MTD_PHYSMAP=m # CONFIG_MTD_NAND is not set # CONFIG_MTD_ONENAND is not set +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set + # # UBI - Unsorted block images # @@ -599,10 +647,20 @@ CONFIG_BLK_DEV_RAM_SIZE=4096 # CONFIG_ATA_OVER_ETH is not set # CONFIG_BLK_DEV_HD is not set CONFIG_MISC_DEVICES=y -# CONFIG_EEPROM_93CX6 is not set +# CONFIG_AD525X_DPOT is not set # CONFIG_ICS932S401 is not set # CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_ISL29003 is not set # CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set CONFIG_HAVE_IDE=y # CONFIG_IDE is not set @@ -637,6 +695,9 @@ CONFIG_SMSC_PHY=y # CONFIG_BROADCOM_PHY is not set # CONFIG_ICPLUS_PHY is not set # CONFIG_REALTEK_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_LSI_ET1011C_PHY is not set # CONFIG_FIXED_PHY is not set # CONFIG_MDIO_BITBANG is not set CONFIG_NET_ETHERNET=y @@ -647,9 +708,12 @@ CONFIG_BFIN_TX_DESC_NUM=10 CONFIG_BFIN_RX_DESC_NUM=20 # CONFIG_BFIN_MAC_RMII is not set # CONFIG_SMC91X is not set -# CONFIG_SMSC911X is not set # CONFIG_DM9000 is not set # CONFIG_ENC28J60 is not set +# CONFIG_ETHOC is not set +# CONFIG_SMSC911X is not set +# CONFIG_DNET is not set +# CONFIG_ADF702X is not set # CONFIG_IBM_NEW_EMAC_ZMII is not set # CONFIG_IBM_NEW_EMAC_RGMII is not set # CONFIG_IBM_NEW_EMAC_TAH is not set @@ -658,15 +722,16 @@ CONFIG_BFIN_RX_DESC_NUM=20 # CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set # CONFIG_B44 is not set +# CONFIG_KS8842 is not set +# CONFIG_KS8851 is not set +# CONFIG_KS8851_MLL is not set # CONFIG_NETDEV_1000 is not set # CONFIG_NETDEV_10000 is not set +# CONFIG_WLAN is not set # -# Wireless LAN +# Enable WiMAX (Networking options) to see the WiMAX drivers # -# CONFIG_WLAN_PRE80211 is not set -# CONFIG_WLAN_80211 is not set -# CONFIG_IWLWIFI_LEDS is not set # CONFIG_WAN is not set # CONFIG_PPP is not set # CONFIG_SLIP is not set @@ -701,7 +766,10 @@ CONFIG_INPUT_EVDEV=m # CONFIG_INPUT_TOUCHSCREEN is not set CONFIG_INPUT_MISC=y # CONFIG_INPUT_UINPUT is not set -CONFIG_CONFIG_INPUT_PCF8574=m +# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_PCF8574 is not set # # Hardware I/O ports @@ -712,16 +780,13 @@ CONFIG_CONFIG_INPUT_PCF8574=m # # Character devices # -# CONFIG_AD9960 is not set CONFIG_BFIN_DMA_INTERFACE=m # CONFIG_BFIN_PPI is not set # CONFIG_BFIN_PPIFCD is not set # CONFIG_BFIN_SIMPLE_TIMER is not set # CONFIG_BFIN_SPI_ADC is not set CONFIG_BFIN_SPORT=m -# CONFIG_BFIN_TIMER_LATENCY is not set # CONFIG_BFIN_TWI_LCD is not set -CONFIG_SIMPLE_GPIO=m # CONFIG_VT is not set # CONFIG_DEVKMEM is not set CONFIG_BFIN_JTAG_COMM=m @@ -735,6 +800,7 @@ CONFIG_BFIN_JTAG_COMM=m # # Non-8250 serial port support # +# CONFIG_SERIAL_MAX3100 is not set CONFIG_SERIAL_BFIN=y CONFIG_SERIAL_BFIN_CONSOLE=y CONFIG_SERIAL_BFIN_DMA=y @@ -746,17 +812,8 @@ CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y # CONFIG_SERIAL_BFIN_SPORT is not set CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set # CONFIG_LEGACY_PTYS is not set - -# -# CAN, the car bus and industrial fieldbus -# -CONFIG_CAN4LINUX=y - -# -# linux embedded drivers -# -CONFIG_CAN_BLACKFIN=m # CONFIG_IPMI_HANDLER is not set # CONFIG_HW_RANDOM is not set # CONFIG_R3964 is not set @@ -764,6 +821,7 @@ CONFIG_CAN_BLACKFIN=m # CONFIG_TCG_TPM is not set CONFIG_I2C=m CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y CONFIG_I2C_CHARDEV=m CONFIG_I2C_HELPER_AUTO=y @@ -796,14 +854,6 @@ CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_EEPROM_AT24 is not set -CONFIG_SENSORS_AD5252=m -# CONFIG_EEPROM_LEGACY is not set -# CONFIG_SENSORS_PCF8574 is not set -# CONFIG_PCF8575 is not set -# CONFIG_SENSORS_PCA9539 is not set -# CONFIG_SENSORS_PCF8591 is not set -# CONFIG_SENSORS_MAX6875 is not set # CONFIG_SENSORS_TSL2550 is not set # CONFIG_I2C_DEBUG_CORE is not set # CONFIG_I2C_DEBUG_ALGO is not set @@ -820,13 +870,18 @@ CONFIG_SPI_BFIN=y # CONFIG_SPI_BFIN_LOCK is not set # CONFIG_SPI_BFIN_SPORT is not set # CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_GPIO is not set # # SPI Protocol Masters # -# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set + +# +# PPS support +# +# CONFIG_PPS is not set CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y CONFIG_GPIOLIB=y # CONFIG_DEBUG_GPIO is not set @@ -842,6 +897,7 @@ CONFIG_GPIO_SYSFS=y # CONFIG_GPIO_MAX732X is not set # CONFIG_GPIO_PCA953X is not set # CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_ADP5588 is not set # # PCI GPIO expanders: @@ -852,11 +908,15 @@ CONFIG_GPIO_SYSFS=y # # CONFIG_GPIO_MAX7301 is not set # CONFIG_GPIO_MCP23S08 is not set +# CONFIG_GPIO_MC33880 is not set + +# +# AC97 GPIO expanders: +# # CONFIG_W1 is not set # CONFIG_POWER_SUPPLY is not set # CONFIG_HWMON is not set # CONFIG_THERMAL is not set -# CONFIG_THERMAL_HWMON is not set CONFIG_WATCHDOG=y # CONFIG_WATCHDOG_NOWAYOUT is not set @@ -878,26 +938,18 @@ CONFIG_SSB_POSSIBLE=y # CONFIG_MFD_CORE is not set # CONFIG_MFD_SM501 is not set # CONFIG_HTC_PASIC3 is not set +# CONFIG_UCB1400_CORE is not set +# CONFIG_TPS65010 is not set # CONFIG_MFD_TMIO is not set # CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X is not set # CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_MC13783 is not set +# CONFIG_AB3100_CORE is not set +# CONFIG_EZX_PCAP is not set # CONFIG_REGULATOR is not set - -# -# Multimedia devices -# - -# -# Multimedia core support -# -# CONFIG_VIDEO_DEV is not set -# CONFIG_DVB_CORE is not set -# CONFIG_VIDEO_MEDIA is not set - -# -# Multimedia drivers -# -# CONFIG_DAB is not set +# CONFIG_MEDIA_SUPPORT is not set # # Graphics support @@ -929,9 +981,6 @@ CONFIG_FB_CFB_IMAGEBLIT=m # CONFIG_FB_BFIN_T350MCQB is not set # CONFIG_FB_BFIN_LQ035Q1 is not set CONFIG_FB_BF537_LQ035=m -CONFIG_LQ035_SLAVE_ADDR=0x58 -# CONFIG_FB_BFIN_LANDSCAPE is not set -# CONFIG_FB_BFIN_BGR is not set CONFIG_FB_BFIN_7393=m CONFIG_NTSC=y # CONFIG_PAL is not set @@ -946,15 +995,18 @@ CONFIG_ADV7393_1XMEM=y # CONFIG_FB_VIRTUAL is not set # CONFIG_FB_METRONOME is not set # CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set CONFIG_BACKLIGHT_LCD_SUPPORT=y CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_LMS283GF05 is not set # CONFIG_LCD_LTV350QV is not set # CONFIG_LCD_ILI9320 is not set # CONFIG_LCD_TDO24M is not set # CONFIG_LCD_VGG2432A4 is not set # CONFIG_LCD_PLATFORM is not set CONFIG_BACKLIGHT_CLASS_DEVICE=m -CONFIG_BACKLIGHT_CORGI=m +CONFIG_BACKLIGHT_GENERIC=m +# CONFIG_BACKLIGHT_ADP8870 is not set # # Display device support @@ -963,19 +1015,27 @@ CONFIG_BACKLIGHT_CORGI=m # CONFIG_LOGO is not set CONFIG_SOUND=m CONFIG_SOUND_OSS_CORE=y +CONFIG_SOUND_OSS_CORE_PRECLAIM=y CONFIG_SND=m CONFIG_SND_TIMER=m CONFIG_SND_PCM=m +CONFIG_SND_JACK=y # CONFIG_SND_SEQUENCER is not set CONFIG_SND_OSSEMUL=y CONFIG_SND_MIXER_OSS=m CONFIG_SND_PCM_OSS=m CONFIG_SND_PCM_OSS_PLUGINS=y +# CONFIG_SND_HRTIMER is not set # CONFIG_SND_DYNAMIC_MINORS is not set CONFIG_SND_SUPPORT_OLD_API=y CONFIG_SND_VERBOSE_PROCFS=y # CONFIG_SND_VERBOSE_PRINTK is not set # CONFIG_SND_DEBUG is not set +# CONFIG_SND_RAWMIDI_SEQ is not set +# CONFIG_SND_OPL3_LIB_SEQ is not set +# CONFIG_SND_OPL4_LIB_SEQ is not set +# CONFIG_SND_SBAWE_SEQ is not set +# CONFIG_SND_EMU10K1_SEQ is not set CONFIG_SND_DRIVERS=y # CONFIG_SND_DUMMY is not set # CONFIG_SND_MTPAV is not set @@ -986,13 +1046,6 @@ CONFIG_SND_SPI=y # # ALSA Blackfin devices # -CONFIG_SND_BLACKFIN_AD1836=m -CONFIG_SND_BLACKFIN_AD1836_TDM=y -# CONFIG_SND_BLACKFIN_AD1836_I2S is not set -CONFIG_SND_BLACKFIN_AD1836_MULSUB=y -# CONFIG_SND_BLACKFIN_AD1836_5P1 is not set -CONFIG_SND_BLACKFIN_SPORT=0 -CONFIG_SND_BLACKFIN_SPI_PFBIT=4 CONFIG_SND_BFIN_SPORT=0 CONFIG_SND_BFIN_AD73322=m CONFIG_SND_BFIN_AD73322_SPORT0_SE=10 @@ -1003,16 +1056,20 @@ CONFIG_SND_SOC_AC97_BUS=y CONFIG_SND_BF5XX_I2S=m # CONFIG_SND_BF5XX_SOC_SSM2602 is not set CONFIG_SND_BF5XX_SOC_AD73311=m +# CONFIG_SND_BF5XX_SOC_ADAU1371 is not set +# CONFIG_SND_BF5XX_SOC_ADAU1761 is not set CONFIG_SND_BFIN_AD73311_SE=4 +# CONFIG_SND_BF5XX_TDM is not set CONFIG_SND_BF5XX_AC97=m CONFIG_SND_BF5XX_MMAP_SUPPORT=y # CONFIG_SND_BF5XX_MULTICHAN_SUPPORT is not set +# CONFIG_SND_BF5XX_HAVE_COLD_RESET is not set +CONFIG_SND_BF5XX_SOC_AD1980=m CONFIG_SND_BF5XX_SOC_SPORT=m CONFIG_SND_BF5XX_SOC_I2S=m CONFIG_SND_BF5XX_SOC_AC97=m -CONFIG_SND_BF5XX_SOC_AD1980=m CONFIG_SND_BF5XX_SPORT_NUM=0 -# CONFIG_SND_BF5XX_HAVE_COLD_RESET is not set +CONFIG_SND_SOC_I2C_AND_SPI=m # CONFIG_SND_SOC_ALL_CODECS is not set CONFIG_SND_SOC_AD1980=m CONFIG_SND_SOC_AD73311=m @@ -1020,14 +1077,12 @@ CONFIG_SND_SOC_AD73311=m CONFIG_AC97_BUS=m CONFIG_HID_SUPPORT=y CONFIG_HID=y -# CONFIG_HID_DEBUG is not set # CONFIG_HIDRAW is not set # CONFIG_HID_PID is not set # # Special HID drivers # -CONFIG_HID_COMPAT=y # CONFIG_USB_SUPPORT is not set # CONFIG_MMC is not set # CONFIG_MEMSTICK is not set @@ -1064,6 +1119,7 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_S35390A is not set # CONFIG_RTC_DRV_FM3130 is not set # CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set # # SPI RTC drivers @@ -1075,6 +1131,7 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_R9701 is not set # CONFIG_RTC_DRV_RS5C348 is not set # CONFIG_RTC_DRV_DS3234 is not set +# CONFIG_RTC_DRV_PCF2123 is not set # # Platform RTC drivers @@ -1095,9 +1152,20 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_BFIN=y # CONFIG_DMADEVICES is not set +# CONFIG_AUXDISPLAY is not set # CONFIG_UIO is not set + +# +# TI VLYNQ +# # CONFIG_STAGING is not set +# +# Firmware Drivers +# +# CONFIG_FIRMWARE_MEMMAP is not set +# CONFIG_SIGMA is not set + # # File systems # @@ -1107,9 +1175,13 @@ CONFIG_RTC_DRV_BFIN=y # CONFIG_REISERFS_FS is not set # CONFIG_JFS_FS is not set # CONFIG_FS_POSIX_ACL is not set -CONFIG_FILE_LOCKING=y # CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set # CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y # CONFIG_DNOTIFY is not set CONFIG_INOTIFY=y CONFIG_INOTIFY_USER=y @@ -1118,6 +1190,11 @@ CONFIG_INOTIFY_USER=y # CONFIG_AUTOFS4_FS is not set # CONFIG_FUSE_FS is not set +# +# Caches +# +# CONFIG_FSCACHE is not set + # # CD-ROM/DVD Filesystems # @@ -1137,13 +1214,9 @@ CONFIG_INOTIFY_USER=y CONFIG_PROC_FS=y CONFIG_PROC_SYSCTL=y CONFIG_SYSFS=y -# CONFIG_TMPFS is not set # CONFIG_HUGETLB_PAGE is not set # CONFIG_CONFIGFS_FS is not set - -# -# Miscellaneous filesystems -# +CONFIG_MISC_FILESYSTEMS=y # CONFIG_ADFS_FS is not set # CONFIG_AFFS_FS is not set # CONFIG_HFS_FS is not set @@ -1162,17 +1235,8 @@ CONFIG_JFFS2_ZLIB=y # CONFIG_JFFS2_LZO is not set CONFIG_JFFS2_RTIME=y # CONFIG_JFFS2_RUBIN is not set -CONFIG_YAFFS_FS=m -CONFIG_YAFFS_YAFFS1=y -# CONFIG_YAFFS_9BYTE_TAGS is not set -# CONFIG_YAFFS_DOES_ECC is not set -CONFIG_YAFFS_YAFFS2=y -CONFIG_YAFFS_AUTO_YAFFS2=y -# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set -# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set -# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set -CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y # CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set # CONFIG_VXFS_FS is not set # CONFIG_MINIX_FS is not set # CONFIG_OMFS_FS is not set @@ -1191,7 +1255,6 @@ CONFIG_LOCKD=m CONFIG_LOCKD_V4=y CONFIG_NFS_COMMON=y CONFIG_SUNRPC=m -# CONFIG_SUNRPC_REGISTER_V4 is not set # CONFIG_RPCSEC_GSS_KRB5 is not set # CONFIG_RPCSEC_GSS_SPKM3 is not set CONFIG_SMB_FS=m @@ -1256,14 +1319,19 @@ CONFIG_ENABLE_WARN_DEPRECATED=y CONFIG_ENABLE_MUST_CHECK=y CONFIG_FRAME_WARN=1024 # CONFIG_MAGIC_SYSRQ is not set +# CONFIG_STRIP_ASM_SYMS is not set # CONFIG_UNUSED_SYMBOLS is not set CONFIG_DEBUG_FS=y # CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_SECTION_MISMATCH=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_SHIRQ=y CONFIG_DETECT_SOFTLOCKUP=y # CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_DETECT_HUNG_TASK=y +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 CONFIG_SCHED_DEBUG=y # CONFIG_SCHEDSTATS is not set # CONFIG_TIMER_STATS is not set @@ -1271,31 +1339,39 @@ CONFIG_SCHED_DEBUG=y # CONFIG_DEBUG_SLAB is not set # CONFIG_DEBUG_SPINLOCK is not set # CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set # CONFIG_DEBUG_SPINLOCK_SLEEP is not set # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set # CONFIG_DEBUG_KOBJECT is not set CONFIG_DEBUG_BUGVERBOSE=y CONFIG_DEBUG_INFO=y # CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_NOMMU_REGIONS is not set # CONFIG_DEBUG_WRITECOUNT is not set # CONFIG_DEBUG_MEMORY_INIT is not set # CONFIG_DEBUG_LIST is not set # CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set # CONFIG_FRAME_POINTER is not set # CONFIG_BOOT_PRINTK_DELAY is not set # CONFIG_RCU_TORTURE_TEST is not set # CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_BACKTRACE_SELF_TEST is not set # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set # CONFIG_FAULT_INJECTION is not set - -# -# Tracers -# -# CONFIG_SCHED_TRACER is not set -# CONFIG_CONTEXT_SWITCH_TRACER is not set -# CONFIG_BOOT_TRACER is not set -# CONFIG_DYNAMIC_PRINTK_DEBUG is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_TRACING_SUPPORT=y +# CONFIG_FTRACE is not set +# CONFIG_BRANCH_PROFILE_NONE is not set +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_DYNAMIC_DEBUG is not set # CONFIG_SAMPLES is not set CONFIG_HAVE_ARCH_KGDB=y # CONFIG_KGDB is not set @@ -1320,6 +1396,7 @@ CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE=y CONFIG_EARLY_PRINTK=y CONFIG_CPLB_INFO=y CONFIG_ACCESS_CHECK=y +# CONFIG_BFIN_ISRAM_SELF_TEST is not set # # Security options @@ -1328,14 +1405,14 @@ CONFIG_ACCESS_CHECK=y CONFIG_SECURITY=y # CONFIG_SECURITYFS is not set # CONFIG_SECURITY_NETWORK is not set +# CONFIG_SECURITY_PATH is not set # CONFIG_SECURITY_FILE_CAPABILITIES is not set -CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0 +# CONFIG_SECURITY_TOMOYO is not set CONFIG_CRYPTO=y # # Crypto core or helper # -# CONFIG_CRYPTO_FIPS is not set # CONFIG_CRYPTO_MANAGER is not set # CONFIG_CRYPTO_MANAGER2 is not set # CONFIG_CRYPTO_GF128MUL is not set @@ -1367,11 +1444,13 @@ CONFIG_CRYPTO=y # # CONFIG_CRYPTO_HMAC is not set # CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set # # Digest # # CONFIG_CRYPTO_CRC32C is not set +# CONFIG_CRYPTO_GHASH is not set # CONFIG_CRYPTO_MD4 is not set # CONFIG_CRYPTO_MD5 is not set # CONFIG_CRYPTO_MICHAEL_MIC is not set @@ -1408,6 +1487,7 @@ CONFIG_CRYPTO=y # Compression # # CONFIG_CRYPTO_DEFLATE is not set +# CONFIG_CRYPTO_ZLIB is not set # CONFIG_CRYPTO_LZO is not set # @@ -1415,11 +1495,13 @@ CONFIG_CRYPTO=y # # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_HW=y +# CONFIG_BINARY_PRINTF is not set # # Library routines # CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y CONFIG_CRC_CCITT=m # CONFIG_CRC16 is not set # CONFIG_CRC_T10DIF is not set @@ -1429,6 +1511,8 @@ CONFIG_CRC32=y # CONFIG_LIBCRC32C is not set CONFIG_ZLIB_INFLATE=y CONFIG_ZLIB_DEFLATE=m +CONFIG_DECOMPRESS_GZIP=y CONFIG_HAS_IOMEM=y CONFIG_HAS_IOPORT=y CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/blackfin/configs/BF538-EZKIT_defconfig b/arch/blackfin/configs/BF538-EZKIT_defconfig index 09ea2499555e..bc1871d89fd5 100644 --- a/arch/blackfin/configs/BF538-EZKIT_defconfig +++ b/arch/blackfin/configs/BF538-EZKIT_defconfig @@ -1,22 +1,27 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.28.10 -# Thu May 21 05:50:01 2009 +# Linux kernel version: 2.6.32.2 # # CONFIG_MMU is not set # CONFIG_FPU is not set CONFIG_RWSEM_GENERIC_SPINLOCK=y # CONFIG_RWSEM_XCHGADD_ALGORITHM is not set CONFIG_BLACKFIN=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_BUG=y CONFIG_ZONE_DMA=y CONFIG_GENERIC_FIND_NEXT_BIT=y -CONFIG_GENERIC_HWEIGHT=y CONFIG_GENERIC_HARDIRQS=y CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y CONFIG_GENERIC_GPIO=y CONFIG_FORCE_MAX_ZONEORDER=14 CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y # # General setup @@ -26,22 +31,41 @@ CONFIG_BROKEN_ON_SMP=y CONFIG_INIT_ENV_ARG_LIMIT=32 CONFIG_LOCALVERSION="" CONFIG_LOCALVERSION_AUTO=y +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_BZIP2=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_BZIP2 is not set +# CONFIG_KERNEL_LZMA is not set CONFIG_SYSVIPC=y CONFIG_SYSVIPC_SYSCTL=y # CONFIG_POSIX_MQUEUE is not set # CONFIG_BSD_PROCESS_ACCT is not set # CONFIG_TASKSTATS is not set # CONFIG_AUDIT is not set + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_RCU_TRACE is not set +CONFIG_RCU_FANOUT=32 +# CONFIG_RCU_FANOUT_EXACT is not set +# CONFIG_TREE_RCU_TRACE is not set CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_CGROUPS is not set # CONFIG_GROUP_SCHED is not set +# CONFIG_CGROUPS is not set # CONFIG_SYSFS_DEPRECATED_V2 is not set # CONFIG_RELAY is not set # CONFIG_NAMESPACES is not set CONFIG_BLK_DEV_INITRD=y CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +# CONFIG_RD_BZIP2 is not set +# CONFIG_RD_LZMA is not set # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SYSCTL=y CONFIG_ANON_INODES=y @@ -62,6 +86,10 @@ CONFIG_EPOLL=y # CONFIG_TIMERFD is not set # CONFIG_EVENTFD is not set # CONFIG_AIO is not set + +# +# Kernel Performance Events And Counters +# CONFIG_VM_EVENT_COUNTERS=y CONFIG_COMPAT_BRK=y CONFIG_SLAB=y @@ -69,11 +97,15 @@ CONFIG_SLAB=y # CONFIG_SLOB is not set CONFIG_MMAP_ALLOW_UNINITIALIZED=y # CONFIG_PROFILING is not set -# CONFIG_MARKERS is not set CONFIG_HAVE_OPROFILE=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +# CONFIG_SLOW_WORK is not set # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set CONFIG_SLABINFO=y -CONFIG_TINY_SHMEM=y CONFIG_BASE_SMALL=0 CONFIG_MODULES=y # CONFIG_MODULE_FORCE_LOAD is not set @@ -81,11 +113,8 @@ CONFIG_MODULE_UNLOAD=y # CONFIG_MODULE_FORCE_UNLOAD is not set # CONFIG_MODVERSIONS is not set # CONFIG_MODULE_SRCVERSION_ALL is not set -CONFIG_KMOD=y CONFIG_BLOCK=y -# CONFIG_LBD is not set -# CONFIG_BLK_DEV_IO_TRACE is not set -# CONFIG_LSF is not set +# CONFIG_LBDAF is not set # CONFIG_BLK_DEV_BSG is not set # CONFIG_BLK_DEV_INTEGRITY is not set @@ -101,7 +130,6 @@ CONFIG_DEFAULT_AS=y # CONFIG_DEFAULT_CFQ is not set # CONFIG_DEFAULT_NOOP is not set CONFIG_DEFAULT_IOSCHED="anticipatory" -CONFIG_CLASSIC_RCU=y # CONFIG_PREEMPT_NONE is not set CONFIG_PREEMPT_VOLUNTARY=y # CONFIG_PREEMPT is not set @@ -132,15 +160,15 @@ CONFIG_PREEMPT_VOLUNTARY=y # CONFIG_BF537 is not set CONFIG_BF538=y # CONFIG_BF539 is not set -# CONFIG_BF542 is not set +# CONFIG_BF542_std is not set # CONFIG_BF542M is not set -# CONFIG_BF544 is not set +# CONFIG_BF544_std is not set # CONFIG_BF544M is not set -# CONFIG_BF547 is not set +# CONFIG_BF547_std is not set # CONFIG_BF547M is not set -# CONFIG_BF548 is not set +# CONFIG_BF548_std is not set # CONFIG_BF548M is not set -# CONFIG_BF549 is not set +# CONFIG_BF549_std is not set # CONFIG_BF549M is not set # CONFIG_BF561 is not set CONFIG_BF_REV_MIN=4 @@ -246,7 +274,7 @@ CONFIG_GENERIC_TIME=y CONFIG_GENERIC_CLOCKEVENTS=y # CONFIG_TICKSOURCE_GPTMR0 is not set CONFIG_TICKSOURCE_CORETMR=y -# CONFIG_CYCLES_CLOCKSOURCE is not set +CONFIG_CYCLES_CLOCKSOURCE=y # CONFIG_GPTMR0_CLOCKSOURCE is not set CONFIG_TICK_ONESHOT=y # CONFIG_NO_HZ is not set @@ -298,7 +326,6 @@ CONFIG_FLATMEM=y CONFIG_FLAT_NODE_MEM_MAP=y CONFIG_PAGEFLAGS_EXTENDED=y CONFIG_SPLIT_PTLOCK_CPUS=4 -# CONFIG_RESOURCES_64BIT is not set # CONFIG_PHYS_ADDR_T_64BIT is not set CONFIG_ZONE_DMA_FLAG=1 CONFIG_VIRT_TO_BUS=y @@ -307,16 +334,18 @@ CONFIG_BFIN_GPTIMERS=m # CONFIG_DMA_UNCACHED_4M is not set # CONFIG_DMA_UNCACHED_2M is not set CONFIG_DMA_UNCACHED_1M=y +# CONFIG_DMA_UNCACHED_512K is not set +# CONFIG_DMA_UNCACHED_256K is not set +# CONFIG_DMA_UNCACHED_128K is not set # CONFIG_DMA_UNCACHED_NONE is not set # # Cache Support # CONFIG_BFIN_ICACHE=y -# CONFIG_BFIN_ICACHE_LOCK is not set +CONFIG_BFIN_EXTMEM_ICACHEABLE=y CONFIG_BFIN_DCACHE=y # CONFIG_BFIN_DCACHE_BANKA is not set -CONFIG_BFIN_EXTMEM_ICACHEABLE=y CONFIG_BFIN_EXTMEM_DCACHEABLE=y CONFIG_BFIN_EXTMEM_WRITEBACK=y # CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set @@ -327,7 +356,7 @@ CONFIG_BFIN_EXTMEM_WRITEBACK=y # CONFIG_MPU is not set # -# Asynchonous Memory Configuration +# Asynchronous Memory Configuration # # @@ -383,11 +412,6 @@ CONFIG_NET=y CONFIG_PACKET=y # CONFIG_PACKET_MMAP is not set CONFIG_UNIX=y -CONFIG_XFRM=y -# CONFIG_XFRM_USER is not set -# CONFIG_XFRM_SUB_POLICY is not set -# CONFIG_XFRM_MIGRATE is not set -# CONFIG_XFRM_STATISTICS is not set # CONFIG_NET_KEY is not set CONFIG_INET=y # CONFIG_IP_MULTICAST is not set @@ -411,7 +435,6 @@ CONFIG_IP_PNP=y # CONFIG_INET_XFRM_MODE_BEET is not set # CONFIG_INET_LRO is not set # CONFIG_INET_DIAG is not set -CONFIG_INET_TCP_DIAG=y # CONFIG_TCP_CONG_ADVANCED is not set CONFIG_TCP_CONG_CUBIC=y CONFIG_DEFAULT_TCP_CONG="cubic" @@ -422,6 +445,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic" # CONFIG_NETFILTER is not set # CONFIG_IP_DCCP is not set # CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set # CONFIG_TIPC is not set # CONFIG_ATM is not set # CONFIG_BRIDGE is not set @@ -435,14 +459,34 @@ CONFIG_DEFAULT_TCP_CONG="cubic" # CONFIG_LAPB is not set # CONFIG_ECONET is not set # CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set # CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set # # Network testing # # CONFIG_NET_PKTGEN is not set # CONFIG_HAMRADIO is not set -# CONFIG_CAN is not set +CONFIG_CAN=m +CONFIG_CAN_RAW=m +CONFIG_CAN_BCM=m + +# +# CAN Device Drivers +# +# CONFIG_CAN_VCAN is not set +CONFIG_CAN_DEV=m +# CONFIG_CAN_CALC_BITTIMING is not set +CONFIG_CAN_BFIN=m +# CONFIG_CAN_SJA1000 is not set + +# +# CAN USB interfaces +# +# CONFIG_CAN_EMS_USB is not set +# CONFIG_CAN_DEBUG_DEVICES is not set CONFIG_IRDA=m # @@ -481,13 +525,8 @@ CONFIG_SIR_BFIN_DMA=y # # CONFIG_BT is not set # CONFIG_AF_RXRPC is not set -# CONFIG_PHONET is not set -CONFIG_WIRELESS=y -# CONFIG_CFG80211 is not set -CONFIG_WIRELESS_OLD_REGULATORY=y -# CONFIG_WIRELESS_EXT is not set -# CONFIG_MAC80211 is not set -# CONFIG_IEEE80211 is not set +# CONFIG_WIRELESS is not set +# CONFIG_WIMAX is not set # CONFIG_RFKILL is not set # CONFIG_NET_9P is not set @@ -508,6 +547,7 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y # CONFIG_CONNECTOR is not set CONFIG_MTD=y # CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_TESTS is not set # CONFIG_MTD_CONCAT is not set CONFIG_MTD_PARTITIONS=y # CONFIG_MTD_REDBOOT_PARTS is not set @@ -566,6 +606,7 @@ CONFIG_MTD_PHYSMAP=m # # CONFIG_MTD_DATAFLASH is not set # CONFIG_MTD_M25P80 is not set +# CONFIG_MTD_SST25L is not set # CONFIG_MTD_SLRAM is not set # CONFIG_MTD_PHRAM is not set # CONFIG_MTD_MTDRAM is not set @@ -581,17 +622,17 @@ CONFIG_MTD_NAND=m # CONFIG_MTD_NAND_VERIFY_WRITE is not set # CONFIG_MTD_NAND_ECC_SMC is not set # CONFIG_MTD_NAND_MUSEUM_IDS is not set -CONFIG_MTD_NAND_BFIN=m -CONFIG_BFIN_NAND_BASE=0x20212000 -CONFIG_BFIN_NAND_CLE=2 -CONFIG_BFIN_NAND_ALE=1 -CONFIG_BFIN_NAND_READY=3 CONFIG_MTD_NAND_IDS=m # CONFIG_MTD_NAND_DISKONCHIP is not set # CONFIG_MTD_NAND_NANDSIM is not set # CONFIG_MTD_NAND_PLATFORM is not set # CONFIG_MTD_ONENAND is not set +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set + # # UBI - Unsorted block images # @@ -643,14 +684,20 @@ CONFIG_SMSC_PHY=y # CONFIG_BROADCOM_PHY is not set # CONFIG_ICPLUS_PHY is not set # CONFIG_REALTEK_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_LSI_ET1011C_PHY is not set # CONFIG_FIXED_PHY is not set # CONFIG_MDIO_BITBANG is not set CONFIG_NET_ETHERNET=y CONFIG_MII=y CONFIG_SMC91X=y -# CONFIG_SMSC911X is not set # CONFIG_DM9000 is not set # CONFIG_ENC28J60 is not set +# CONFIG_ETHOC is not set +# CONFIG_SMSC911X is not set +# CONFIG_DNET is not set +# CONFIG_ADF702X is not set # CONFIG_IBM_NEW_EMAC_ZMII is not set # CONFIG_IBM_NEW_EMAC_RGMII is not set # CONFIG_IBM_NEW_EMAC_TAH is not set @@ -659,15 +706,16 @@ CONFIG_SMC91X=y # CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set # CONFIG_B44 is not set +# CONFIG_KS8842 is not set +# CONFIG_KS8851 is not set +# CONFIG_KS8851_MLL is not set # CONFIG_NETDEV_1000 is not set # CONFIG_NETDEV_10000 is not set +# CONFIG_WLAN is not set # -# Wireless LAN +# Enable WiMAX (Networking options) to see the WiMAX drivers # -# CONFIG_WLAN_PRE80211 is not set -# CONFIG_WLAN_80211 is not set -# CONFIG_IWLWIFI_LEDS is not set # CONFIG_WAN is not set # CONFIG_PPP is not set # CONFIG_SLIP is not set @@ -700,14 +748,17 @@ CONFIG_INPUT_EVDEV=m # CONFIG_INPUT_JOYSTICK is not set # CONFIG_INPUT_TABLET is not set CONFIG_INPUT_TOUCHSCREEN=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set # CONFIG_TOUCHSCREEN_AD7877 is not set # CONFIG_TOUCHSCREEN_AD7879_I2C is not set CONFIG_TOUCHSCREEN_AD7879_SPI=y CONFIG_TOUCHSCREEN_AD7879=y -# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_EETI is not set # CONFIG_TOUCHSCREEN_FUJITSU is not set # CONFIG_TOUCHSCREEN_GUNZE is not set # CONFIG_TOUCHSCREEN_ELO is not set +# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set # CONFIG_TOUCHSCREEN_MTOUCH is not set # CONFIG_TOUCHSCREEN_INEXIO is not set # CONFIG_TOUCHSCREEN_MK712 is not set @@ -715,9 +766,13 @@ CONFIG_TOUCHSCREEN_AD7879=y # CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set # CONFIG_TOUCHSCREEN_TOUCHWIN is not set # CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set CONFIG_INPUT_MISC=y # CONFIG_INPUT_UINPUT is not set -# CONFIG_CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_PCF8574 is not set # # Hardware I/O ports @@ -728,16 +783,13 @@ CONFIG_INPUT_MISC=y # # Character devices # -# CONFIG_AD9960 is not set CONFIG_BFIN_DMA_INTERFACE=m # CONFIG_BFIN_PPI is not set # CONFIG_BFIN_PPIFCD is not set # CONFIG_BFIN_SIMPLE_TIMER is not set # CONFIG_BFIN_SPI_ADC is not set CONFIG_BFIN_SPORT=m -# CONFIG_BFIN_TIMER_LATENCY is not set # CONFIG_BFIN_TWI_LCD is not set -CONFIG_SIMPLE_GPIO=m # CONFIG_VT is not set # CONFIG_DEVKMEM is not set CONFIG_BFIN_JTAG_COMM=m @@ -751,6 +803,7 @@ CONFIG_BFIN_JTAG_COMM=m # # Non-8250 serial port support # +# CONFIG_SERIAL_MAX3100 is not set CONFIG_SERIAL_BFIN=y CONFIG_SERIAL_BFIN_CONSOLE=y CONFIG_SERIAL_BFIN_DMA=y @@ -765,12 +818,8 @@ CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y # CONFIG_SERIAL_BFIN_SPORT is not set CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set # CONFIG_LEGACY_PTYS is not set - -# -# CAN, the car bus and industrial fieldbus -# -# CONFIG_CAN4LINUX is not set # CONFIG_IPMI_HANDLER is not set # CONFIG_HW_RANDOM is not set # CONFIG_R3964 is not set @@ -778,6 +827,7 @@ CONFIG_UNIX98_PTYS=y # CONFIG_TCG_TPM is not set CONFIG_I2C=m CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y # CONFIG_I2C_CHARDEV is not set CONFIG_I2C_HELPER_AUTO=y @@ -810,14 +860,6 @@ CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_EEPROM_AT24 is not set -# CONFIG_SENSORS_AD5252 is not set -# CONFIG_EEPROM_LEGACY is not set -# CONFIG_SENSORS_PCF8574 is not set -# CONFIG_PCF8575 is not set -# CONFIG_SENSORS_PCA9539 is not set -# CONFIG_SENSORS_PCF8591 is not set -# CONFIG_SENSORS_MAX6875 is not set # CONFIG_SENSORS_TSL2550 is not set # CONFIG_I2C_DEBUG_CORE is not set # CONFIG_I2C_DEBUG_ALGO is not set @@ -834,13 +876,18 @@ CONFIG_SPI_BFIN=y # CONFIG_SPI_BFIN_LOCK is not set # CONFIG_SPI_BFIN_SPORT is not set # CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_GPIO is not set # # SPI Protocol Masters # -# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set + +# +# PPS support +# +# CONFIG_PPS is not set CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y CONFIG_GPIOLIB=y # CONFIG_DEBUG_GPIO is not set @@ -856,6 +903,7 @@ CONFIG_GPIO_SYSFS=y # CONFIG_GPIO_MAX732X is not set # CONFIG_GPIO_PCA953X is not set # CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_ADP5588 is not set # # PCI GPIO expanders: @@ -866,11 +914,15 @@ CONFIG_GPIO_SYSFS=y # # CONFIG_GPIO_MAX7301 is not set # CONFIG_GPIO_MCP23S08 is not set +# CONFIG_GPIO_MC33880 is not set + +# +# AC97 GPIO expanders: +# # CONFIG_W1 is not set # CONFIG_POWER_SUPPLY is not set # CONFIG_HWMON is not set # CONFIG_THERMAL is not set -# CONFIG_THERMAL_HWMON is not set CONFIG_WATCHDOG=y # CONFIG_WATCHDOG_NOWAYOUT is not set @@ -892,26 +944,17 @@ CONFIG_SSB_POSSIBLE=y # CONFIG_MFD_CORE is not set # CONFIG_MFD_SM501 is not set # CONFIG_HTC_PASIC3 is not set +# CONFIG_TPS65010 is not set # CONFIG_MFD_TMIO is not set # CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X is not set # CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_MC13783 is not set +# CONFIG_AB3100_CORE is not set +# CONFIG_EZX_PCAP is not set # CONFIG_REGULATOR is not set - -# -# Multimedia devices -# - -# -# Multimedia core support -# -# CONFIG_VIDEO_DEV is not set -# CONFIG_DVB_CORE is not set -# CONFIG_VIDEO_MEDIA is not set - -# -# Multimedia drivers -# -# CONFIG_DAB is not set +# CONFIG_MEDIA_SUPPORT is not set # # Graphics support @@ -947,6 +990,7 @@ CONFIG_FB_BFIN_LQ035Q1=m # CONFIG_FB_VIRTUAL is not set # CONFIG_FB_METRONOME is not set # CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set # CONFIG_BACKLIGHT_LCD_SUPPORT is not set # @@ -957,14 +1001,12 @@ CONFIG_FB_BFIN_LQ035Q1=m # CONFIG_SOUND is not set CONFIG_HID_SUPPORT=y CONFIG_HID=y -# CONFIG_HID_DEBUG is not set # CONFIG_HIDRAW is not set # CONFIG_HID_PID is not set # # Special HID drivers # -CONFIG_HID_COMPAT=y # CONFIG_USB_SUPPORT is not set # CONFIG_MMC is not set # CONFIG_MEMSTICK is not set @@ -1001,6 +1043,7 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_S35390A is not set # CONFIG_RTC_DRV_FM3130 is not set # CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set # # SPI RTC drivers @@ -1012,6 +1055,7 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_R9701 is not set # CONFIG_RTC_DRV_RS5C348 is not set # CONFIG_RTC_DRV_DS3234 is not set +# CONFIG_RTC_DRV_PCF2123 is not set # # Platform RTC drivers @@ -1032,9 +1076,20 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_BFIN=y # CONFIG_DMADEVICES is not set +# CONFIG_AUXDISPLAY is not set # CONFIG_UIO is not set + +# +# TI VLYNQ +# # CONFIG_STAGING is not set +# +# Firmware Drivers +# +# CONFIG_FIRMWARE_MEMMAP is not set +# CONFIG_SIGMA is not set + # # File systems # @@ -1044,9 +1099,13 @@ CONFIG_RTC_DRV_BFIN=y # CONFIG_REISERFS_FS is not set # CONFIG_JFS_FS is not set # CONFIG_FS_POSIX_ACL is not set -CONFIG_FILE_LOCKING=y # CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set # CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y # CONFIG_DNOTIFY is not set CONFIG_INOTIFY=y CONFIG_INOTIFY_USER=y @@ -1055,6 +1114,11 @@ CONFIG_INOTIFY_USER=y # CONFIG_AUTOFS4_FS is not set # CONFIG_FUSE_FS is not set +# +# Caches +# +# CONFIG_FSCACHE is not set + # # CD-ROM/DVD Filesystems # @@ -1074,13 +1138,9 @@ CONFIG_INOTIFY_USER=y CONFIG_PROC_FS=y CONFIG_PROC_SYSCTL=y CONFIG_SYSFS=y -# CONFIG_TMPFS is not set # CONFIG_HUGETLB_PAGE is not set # CONFIG_CONFIGFS_FS is not set - -# -# Miscellaneous filesystems -# +CONFIG_MISC_FILESYSTEMS=y # CONFIG_ADFS_FS is not set # CONFIG_AFFS_FS is not set # CONFIG_HFS_FS is not set @@ -1099,17 +1159,8 @@ CONFIG_JFFS2_ZLIB=y # CONFIG_JFFS2_LZO is not set CONFIG_JFFS2_RTIME=y # CONFIG_JFFS2_RUBIN is not set -CONFIG_YAFFS_FS=m -CONFIG_YAFFS_YAFFS1=y -# CONFIG_YAFFS_9BYTE_TAGS is not set -# CONFIG_YAFFS_DOES_ECC is not set -CONFIG_YAFFS_YAFFS2=y -CONFIG_YAFFS_AUTO_YAFFS2=y -# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set -# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set -# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set -CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y # CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set # CONFIG_VXFS_FS is not set # CONFIG_MINIX_FS is not set # CONFIG_OMFS_FS is not set @@ -1128,7 +1179,6 @@ CONFIG_LOCKD=m CONFIG_LOCKD_V4=y CONFIG_NFS_COMMON=y CONFIG_SUNRPC=m -# CONFIG_SUNRPC_REGISTER_V4 is not set # CONFIG_RPCSEC_GSS_KRB5 is not set # CONFIG_RPCSEC_GSS_SPKM3 is not set CONFIG_SMB_FS=m @@ -1193,14 +1243,19 @@ CONFIG_ENABLE_WARN_DEPRECATED=y CONFIG_ENABLE_MUST_CHECK=y CONFIG_FRAME_WARN=1024 # CONFIG_MAGIC_SYSRQ is not set +# CONFIG_STRIP_ASM_SYMS is not set # CONFIG_UNUSED_SYMBOLS is not set CONFIG_DEBUG_FS=y # CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_SECTION_MISMATCH=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_SHIRQ=y CONFIG_DETECT_SOFTLOCKUP=y # CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_DETECT_HUNG_TASK=y +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 CONFIG_SCHED_DEBUG=y # CONFIG_SCHEDSTATS is not set # CONFIG_TIMER_STATS is not set @@ -1208,31 +1263,39 @@ CONFIG_SCHED_DEBUG=y # CONFIG_DEBUG_SLAB is not set # CONFIG_DEBUG_SPINLOCK is not set # CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set # CONFIG_DEBUG_SPINLOCK_SLEEP is not set # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set # CONFIG_DEBUG_KOBJECT is not set CONFIG_DEBUG_BUGVERBOSE=y CONFIG_DEBUG_INFO=y # CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_NOMMU_REGIONS is not set # CONFIG_DEBUG_WRITECOUNT is not set # CONFIG_DEBUG_MEMORY_INIT is not set # CONFIG_DEBUG_LIST is not set # CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set # CONFIG_FRAME_POINTER is not set # CONFIG_BOOT_PRINTK_DELAY is not set # CONFIG_RCU_TORTURE_TEST is not set # CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_BACKTRACE_SELF_TEST is not set # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set # CONFIG_FAULT_INJECTION is not set - -# -# Tracers -# -# CONFIG_SCHED_TRACER is not set -# CONFIG_CONTEXT_SWITCH_TRACER is not set -# CONFIG_BOOT_TRACER is not set -# CONFIG_DYNAMIC_PRINTK_DEBUG is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_TRACING_SUPPORT=y +# CONFIG_FTRACE is not set +# CONFIG_BRANCH_PROFILE_NONE is not set +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_DYNAMIC_DEBUG is not set # CONFIG_SAMPLES is not set CONFIG_HAVE_ARCH_KGDB=y # CONFIG_KGDB is not set @@ -1257,6 +1320,7 @@ CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE=y CONFIG_EARLY_PRINTK=y CONFIG_CPLB_INFO=y CONFIG_ACCESS_CHECK=y +# CONFIG_BFIN_ISRAM_SELF_TEST is not set # # Security options @@ -1265,14 +1329,14 @@ CONFIG_ACCESS_CHECK=y CONFIG_SECURITY=y # CONFIG_SECURITYFS is not set # CONFIG_SECURITY_NETWORK is not set +# CONFIG_SECURITY_PATH is not set # CONFIG_SECURITY_FILE_CAPABILITIES is not set -CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0 +# CONFIG_SECURITY_TOMOYO is not set CONFIG_CRYPTO=y # # Crypto core or helper # -# CONFIG_CRYPTO_FIPS is not set # CONFIG_CRYPTO_MANAGER is not set # CONFIG_CRYPTO_MANAGER2 is not set # CONFIG_CRYPTO_GF128MUL is not set @@ -1304,11 +1368,13 @@ CONFIG_CRYPTO=y # # CONFIG_CRYPTO_HMAC is not set # CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set # # Digest # # CONFIG_CRYPTO_CRC32C is not set +# CONFIG_CRYPTO_GHASH is not set # CONFIG_CRYPTO_MD4 is not set # CONFIG_CRYPTO_MD5 is not set # CONFIG_CRYPTO_MICHAEL_MIC is not set @@ -1345,6 +1411,7 @@ CONFIG_CRYPTO=y # Compression # # CONFIG_CRYPTO_DEFLATE is not set +# CONFIG_CRYPTO_ZLIB is not set # CONFIG_CRYPTO_LZO is not set # @@ -1352,11 +1419,13 @@ CONFIG_CRYPTO=y # # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_HW=y +# CONFIG_BINARY_PRINTF is not set # # Library routines # CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y CONFIG_CRC_CCITT=m # CONFIG_CRC16 is not set # CONFIG_CRC_T10DIF is not set @@ -1366,6 +1435,8 @@ CONFIG_CRC32=y # CONFIG_LIBCRC32C is not set CONFIG_ZLIB_INFLATE=y CONFIG_ZLIB_DEFLATE=m +CONFIG_DECOMPRESS_GZIP=y CONFIG_HAS_IOMEM=y CONFIG_HAS_IOPORT=y CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/blackfin/configs/BF548-EZKIT_defconfig b/arch/blackfin/configs/BF548-EZKIT_defconfig index eb3e98b6f3f0..ca309cfc6ac4 100644 --- a/arch/blackfin/configs/BF548-EZKIT_defconfig +++ b/arch/blackfin/configs/BF548-EZKIT_defconfig @@ -1,7 +1,6 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.31.5 -# Mon Nov 2 22:02:56 2009 +# Linux kernel version: 2.6.32.2 # # CONFIG_MMU is not set # CONFIG_FPU is not set @@ -12,7 +11,6 @@ CONFIG_GENERIC_CSUM=y CONFIG_GENERIC_BUG=y CONFIG_ZONE_DMA=y CONFIG_GENERIC_FIND_NEXT_BIT=y -CONFIG_GENERIC_HWEIGHT=y CONFIG_GENERIC_HARDIRQS=y CONFIG_GENERIC_IRQ_PROBE=y CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y @@ -49,11 +47,12 @@ CONFIG_SYSVIPC_SYSCTL=y # # RCU Subsystem # -CONFIG_CLASSIC_RCU=y -# CONFIG_TREE_RCU is not set -# CONFIG_PREEMPT_RCU is not set +CONFIG_TREE_RCU=y +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_RCU_TRACE is not set +CONFIG_RCU_FANOUT=32 +# CONFIG_RCU_FANOUT_EXACT is not set # CONFIG_TREE_RCU_TRACE is not set -# CONFIG_PREEMPT_RCU_TRACE is not set CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 @@ -89,24 +88,23 @@ CONFIG_EPOLL=y # CONFIG_AIO is not set # -# Performance Counters +# Kernel Performance Events And Counters # CONFIG_VM_EVENT_COUNTERS=y -# CONFIG_STRIP_ASM_SYMS is not set CONFIG_COMPAT_BRK=y CONFIG_SLAB=y # CONFIG_SLUB is not set # CONFIG_SLOB is not set CONFIG_MMAP_ALLOW_UNINITIALIZED=y # CONFIG_PROFILING is not set -# CONFIG_MARKERS is not set CONFIG_HAVE_OPROFILE=y # # GCOV-based kernel profiling # # CONFIG_GCOV_KERNEL is not set -# CONFIG_SLOW_WORK is not set +CONFIG_SLOW_WORK=y +# CONFIG_SLOW_WORK_DEBUG is not set # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set CONFIG_SLABINFO=y CONFIG_BASE_SMALL=0 @@ -163,15 +161,15 @@ CONFIG_PREEMPT_VOLUNTARY=y # CONFIG_BF537 is not set # CONFIG_BF538 is not set # CONFIG_BF539 is not set -# CONFIG_BF542 is not set +# CONFIG_BF542_std is not set # CONFIG_BF542M is not set -# CONFIG_BF544 is not set +# CONFIG_BF544_std is not set # CONFIG_BF544M is not set -# CONFIG_BF547 is not set +# CONFIG_BF547_std is not set # CONFIG_BF547M is not set CONFIG_BF548_std=y # CONFIG_BF548M is not set -# CONFIG_BF549 is not set +# CONFIG_BF549_std is not set # CONFIG_BF549M is not set # CONFIG_BF561 is not set CONFIG_BF_REV_MIN=0 @@ -185,7 +183,6 @@ CONFIG_BF_REV_0_2=y # CONFIG_BF_REV_0_6 is not set # CONFIG_BF_REV_ANY is not set # CONFIG_BF_REV_NONE is not set -CONFIG_BF54x=y CONFIG_IRQ_PLL_WAKEUP=7 CONFIG_IRQ_RTC=8 CONFIG_IRQ_SPORT0_RX=9 @@ -221,6 +218,8 @@ CONFIG_IRQ_SPI1=10 CONFIG_IRQ_SPI2=10 CONFIG_IRQ_TWI0=11 CONFIG_IRQ_TWI1=11 +CONFIG_BF548=y +CONFIG_BF54x=y CONFIG_BFIN548_EZKIT=y # CONFIG_BFIN548_BLUETECHNIX_CM is not set @@ -387,12 +386,14 @@ CONFIG_SPLIT_PTLOCK_CPUS=4 # CONFIG_PHYS_ADDR_T_64BIT is not set CONFIG_ZONE_DMA_FLAG=1 CONFIG_VIRT_TO_BUS=y -CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 CONFIG_BFIN_GPTIMERS=m # CONFIG_DMA_UNCACHED_4M is not set CONFIG_DMA_UNCACHED_2M=y # CONFIG_DMA_UNCACHED_1M is not set +# CONFIG_DMA_UNCACHED_512K is not set +# CONFIG_DMA_UNCACHED_256K is not set +# CONFIG_DMA_UNCACHED_128K is not set # CONFIG_DMA_UNCACHED_NONE is not set # @@ -505,6 +506,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic" # CONFIG_NETFILTER is not set # CONFIG_IP_DCCP is not set # CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set # CONFIG_TIPC is not set # CONFIG_ATM is not set # CONFIG_BRIDGE is not set @@ -528,7 +530,24 @@ CONFIG_DEFAULT_TCP_CONG="cubic" # # CONFIG_NET_PKTGEN is not set # CONFIG_HAMRADIO is not set -# CONFIG_CAN is not set +CONFIG_CAN=m +CONFIG_CAN_RAW=m +CONFIG_CAN_BCM=m + +# +# CAN Device Drivers +# +# CONFIG_CAN_VCAN is not set +CONFIG_CAN_DEV=m +# CONFIG_CAN_CALC_BITTIMING is not set +CONFIG_CAN_BFIN=m +# CONFIG_CAN_SJA1000 is not set + +# +# CAN USB interfaces +# +# CONFIG_CAN_EMS_USB is not set +# CONFIG_CAN_DEBUG_DEVICES is not set CONFIG_IRDA=m # @@ -663,6 +682,7 @@ CONFIG_MTD_PHYSMAP=y # CONFIG_MTD_DATAFLASH is not set CONFIG_MTD_M25P80=y CONFIG_M25PXX_USE_FAST_READ=y +# CONFIG_MTD_SST25L is not set # CONFIG_MTD_SLRAM is not set # CONFIG_MTD_PHRAM is not set # CONFIG_MTD_MTDRAM is not set @@ -711,10 +731,10 @@ CONFIG_BLK_DEV_RAM_SIZE=4096 # CONFIG_ATA_OVER_ETH is not set # CONFIG_BLK_DEV_HD is not set CONFIG_MISC_DEVICES=y +# CONFIG_AD525X_DPOT is not set # CONFIG_ICS932S401 is not set # CONFIG_ENCLOSURE_SERVICES is not set # CONFIG_ISL29003 is not set -# CONFIG_AD525X_DPOT is not set # CONFIG_C2PORT is not set # @@ -767,7 +787,8 @@ CONFIG_SCSI_WAIT_SCAN=m # CONFIG_SCSI_OSD_INITIATOR is not set CONFIG_ATA=y # CONFIG_ATA_NONSTANDARD is not set -CONFIG_SATA_PMP=y +CONFIG_ATA_VERBOSE_ERROR=y +# CONFIG_SATA_PMP is not set CONFIG_ATA_SFF=y # CONFIG_SATA_MV is not set # CONFIG_PATA_PLATFORM is not set @@ -808,6 +829,7 @@ CONFIG_MII=y # CONFIG_ETHOC is not set CONFIG_SMSC911X=y # CONFIG_DNET is not set +# CONFIG_ADF702X is not set # CONFIG_IBM_NEW_EMAC_ZMII is not set # CONFIG_IBM_NEW_EMAC_RGMII is not set # CONFIG_IBM_NEW_EMAC_TAH is not set @@ -818,12 +840,10 @@ CONFIG_SMSC911X=y # CONFIG_B44 is not set # CONFIG_KS8842 is not set # CONFIG_KS8851 is not set +# CONFIG_KS8851_MLL is not set # CONFIG_NETDEV_1000 is not set # CONFIG_NETDEV_10000 is not set - -# -# Wireless LAN -# +CONFIG_WLAN=y # CONFIG_WLAN_PRE80211 is not set CONFIG_WLAN_80211=y CONFIG_LIBERTAS=m @@ -877,10 +897,12 @@ CONFIG_INPUT_EVBUG=m CONFIG_INPUT_KEYBOARD=y # CONFIG_KEYBOARD_ADP5588 is not set # CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_QT2160 is not set CONFIG_KEYBOARD_BFIN=y # CONFIG_KEYBOARD_LKKBD is not set # CONFIG_KEYBOARD_GPIO is not set # CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_MAX7359 is not set # CONFIG_KEYBOARD_NEWTON is not set # CONFIG_KEYBOARD_OPENCORES is not set # CONFIG_KEYBOARD_STOWAWAY is not set @@ -900,6 +922,7 @@ CONFIG_TOUCHSCREEN_AD7877=m # CONFIG_TOUCHSCREEN_GUNZE is not set # CONFIG_TOUCHSCREEN_ELO is not set # CONFIG_TOUCHSCREEN_WACOM_W8001 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set # CONFIG_TOUCHSCREEN_MTOUCH is not set # CONFIG_TOUCHSCREEN_INEXIO is not set # CONFIG_TOUCHSCREEN_MK712 is not set @@ -910,7 +933,6 @@ CONFIG_TOUCHSCREEN_AD7877=m # CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set # CONFIG_TOUCHSCREEN_TOUCHIT213 is not set # CONFIG_TOUCHSCREEN_TSC2007 is not set -# CONFIG_TOUCHSCREEN_W90X900 is not set CONFIG_INPUT_MISC=y # CONFIG_INPUT_ATI_REMOTE is not set # CONFIG_INPUT_ATI_REMOTE2 is not set @@ -976,11 +998,6 @@ CONFIG_UNIX98_PTYS=y # CONFIG_LEGACY_PTYS is not set CONFIG_BFIN_OTP=y # CONFIG_BFIN_OTP_WRITE_ENABLE is not set - -# -# CAN, the car bus and industrial fieldbus -# -# CONFIG_CAN4LINUX is not set # CONFIG_IPMI_HANDLER is not set # CONFIG_HW_RANDOM is not set # CONFIG_R3964 is not set @@ -988,6 +1005,7 @@ CONFIG_BFIN_OTP=y # CONFIG_TCG_TPM is not set CONFIG_I2C=y CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y CONFIG_I2C_CHARDEV=y CONFIG_I2C_HELPER_AUTO=y @@ -1021,9 +1039,6 @@ CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_SENSORS_PCF8574 is not set -# CONFIG_PCF8575 is not set -# CONFIG_SENSORS_PCA9539 is not set # CONFIG_SENSORS_TSL2550 is not set # CONFIG_I2C_DEBUG_CORE is not set # CONFIG_I2C_DEBUG_ALGO is not set @@ -1078,11 +1093,15 @@ CONFIG_GPIO_SYSFS=y # # CONFIG_GPIO_MAX7301 is not set # CONFIG_GPIO_MCP23S08 is not set +# CONFIG_GPIO_MC33880 is not set + +# +# AC97 GPIO expanders: +# # CONFIG_W1 is not set # CONFIG_POWER_SUPPLY is not set # CONFIG_HWMON is not set # CONFIG_THERMAL is not set -# CONFIG_THERMAL_HWMON is not set CONFIG_WATCHDOG=y # CONFIG_WATCHDOG_NOWAYOUT is not set @@ -1116,8 +1135,10 @@ CONFIG_SSB_POSSIBLE=y # CONFIG_PMIC_DA903X is not set # CONFIG_PMIC_ADP5520 is not set # CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X is not set # CONFIG_MFD_WM8350_I2C is not set # CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_MC13783 is not set # CONFIG_AB3100_CORE is not set # CONFIG_EZX_PCAP is not set # CONFIG_REGULATOR is not set @@ -1192,6 +1213,7 @@ CONFIG_LOGO=y CONFIG_LOGO_BLACKFIN_CLUT224=y CONFIG_SOUND=y CONFIG_SOUND_OSS_CORE=y +CONFIG_SOUND_OSS_CORE_PRECLAIM=y CONFIG_SND=y CONFIG_SND_TIMER=y CONFIG_SND_PCM=y @@ -1245,7 +1267,6 @@ CONFIG_SND_SOC_AD1980=y CONFIG_AC97_BUS=y CONFIG_HID_SUPPORT=y CONFIG_HID=y -# CONFIG_HID_DEBUG is not set # CONFIG_HIDRAW is not set # @@ -1268,6 +1289,7 @@ CONFIG_HID_CYPRESS=y CONFIG_HID_EZKEY=y # CONFIG_HID_KYE is not set CONFIG_HID_GYRATION=y +# CONFIG_HID_TWINHAN is not set # CONFIG_HID_KENSINGTON is not set CONFIG_HID_LOGITECH=y # CONFIG_LOGITECH_FF is not set @@ -1422,10 +1444,11 @@ CONFIG_MMC_BLOCK_BOUNCE=y # MMC/SD/SDIO Host Controller Drivers # # CONFIG_MMC_SDHCI is not set +# CONFIG_MMC_AT91 is not set +# CONFIG_MMC_ATMELMCI is not set +# CONFIG_MMC_SPI is not set CONFIG_SDH_BFIN=y # CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND is not set -# CONFIG_SDH_BFIN_ENABLE_SDIO_IRQ is not set -# CONFIG_MMC_SPI is not set # CONFIG_MEMSTICK is not set # CONFIG_NEW_LEDS is not set # CONFIG_ACCESSIBILITY is not set @@ -1472,6 +1495,7 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_R9701 is not set # CONFIG_RTC_DRV_RS5C348 is not set # CONFIG_RTC_DRV_DS3234 is not set +# CONFIG_RTC_DRV_PCF2123 is not set # # Platform RTC drivers @@ -1522,6 +1546,7 @@ CONFIG_FS_MBCACHE=y # CONFIG_XFS_FS is not set # CONFIG_OCFS2_FS is not set # CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set CONFIG_FILE_LOCKING=y CONFIG_FSNOTIFY=y # CONFIG_DNOTIFY is not set @@ -1563,7 +1588,6 @@ CONFIG_NTFS_RW=y CONFIG_PROC_FS=y CONFIG_PROC_SYSCTL=y CONFIG_SYSFS=y -# CONFIG_TMPFS is not set # CONFIG_HUGETLB_PAGE is not set # CONFIG_CONFIGFS_FS is not set CONFIG_MISC_FILESYSTEMS=y @@ -1595,7 +1619,6 @@ CONFIG_JFFS2_RTIME=y # CONFIG_ROMFS_FS is not set # CONFIG_SYSV_FS is not set # CONFIG_UFS_FS is not set -# CONFIG_NILFS2_FS is not set CONFIG_NETWORK_FILESYSTEMS=y CONFIG_NFS_FS=m CONFIG_NFS_V3=y @@ -1680,6 +1703,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y CONFIG_ENABLE_MUST_CHECK=y CONFIG_FRAME_WARN=1024 # CONFIG_MAGIC_SYSRQ is not set +# CONFIG_STRIP_ASM_SYMS is not set # CONFIG_UNUSED_SYMBOLS is not set CONFIG_DEBUG_FS=y # CONFIG_HEADERS_CHECK is not set @@ -1714,12 +1738,14 @@ CONFIG_DEBUG_INFO=y # CONFIG_DEBUG_LIST is not set # CONFIG_DEBUG_SG is not set # CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set # CONFIG_FRAME_POINTER is not set # CONFIG_BOOT_PRINTK_DELAY is not set # CONFIG_RCU_TORTURE_TEST is not set # CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_BACKTRACE_SELF_TEST is not set # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set # CONFIG_FAULT_INJECTION is not set # CONFIG_PAGE_POISONING is not set CONFIG_HAVE_FUNCTION_TRACER=y @@ -1730,7 +1756,6 @@ CONFIG_TRACING_SUPPORT=y # CONFIG_SAMPLES is not set CONFIG_HAVE_ARCH_KGDB=y # CONFIG_KGDB is not set -# CONFIG_KMEMCHECK is not set # CONFIG_DEBUG_STACKOVERFLOW is not set # CONFIG_DEBUG_STACK_USAGE is not set CONFIG_DEBUG_VERBOSE=y @@ -1766,7 +1791,6 @@ CONFIG_CRYPTO=y # # Crypto core or helper # -# CONFIG_CRYPTO_FIPS is not set # CONFIG_CRYPTO_MANAGER is not set # CONFIG_CRYPTO_MANAGER2 is not set # CONFIG_CRYPTO_GF128MUL is not set @@ -1798,11 +1822,13 @@ CONFIG_CRYPTO=y # # CONFIG_CRYPTO_HMAC is not set # CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set # # Digest # # CONFIG_CRYPTO_CRC32C is not set +# CONFIG_CRYPTO_GHASH is not set # CONFIG_CRYPTO_MD4 is not set # CONFIG_CRYPTO_MD5 is not set # CONFIG_CRYPTO_MICHAEL_MIC is not set diff --git a/arch/blackfin/configs/BF561-ACVILON_defconfig b/arch/blackfin/configs/BF561-ACVILON_defconfig index b9b0f93d0bd3..6a776ce75e9c 100644 --- a/arch/blackfin/configs/BF561-ACVILON_defconfig +++ b/arch/blackfin/configs/BF561-ACVILON_defconfig @@ -114,7 +114,7 @@ CONFIG_MODULE_UNLOAD=y # CONFIG_MODVERSIONS is not set # CONFIG_MODULE_SRCVERSION_ALL is not set CONFIG_BLOCK=y -CONFIG_LBDAF=y +# CONFIG_LBDAF is not set # CONFIG_BLK_DEV_BSG is not set # CONFIG_BLK_DEV_INTEGRITY is not set @@ -1486,19 +1486,10 @@ CONFIG_DEBUG_INFO=y CONFIG_HAVE_FUNCTION_TRACER=y CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y CONFIG_TRACING_SUPPORT=y -CONFIG_FTRACE=y -# CONFIG_FUNCTION_TRACER is not set -# CONFIG_IRQSOFF_TRACER is not set -# CONFIG_SCHED_TRACER is not set -# CONFIG_ENABLE_DEFAULT_TRACERS is not set -# CONFIG_BOOT_TRACER is not set -CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_FTRACE is not set +# CONFIG_BRANCH_PROFILE_NONE is not set # CONFIG_PROFILE_ANNOTATED_BRANCHES is not set # CONFIG_PROFILE_ALL_BRANCHES is not set -# CONFIG_STACK_TRACER is not set -# CONFIG_KMEMTRACE is not set -# CONFIG_WORKQUEUE_TRACER is not set -# CONFIG_BLK_DEV_IO_TRACE is not set # CONFIG_DYNAMIC_DEBUG is not set # CONFIG_SAMPLES is not set CONFIG_HAVE_ARCH_KGDB=y diff --git a/arch/blackfin/configs/BF561-EZKIT_defconfig b/arch/blackfin/configs/BF561-EZKIT_defconfig index e3ecdcc3e76b..792ff0938835 100644 --- a/arch/blackfin/configs/BF561-EZKIT_defconfig +++ b/arch/blackfin/configs/BF561-EZKIT_defconfig @@ -1,7 +1,6 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.31.5 -# Mon Nov 2 21:59:31 2009 +# Linux kernel version: 2.6.32.2 # # CONFIG_MMU is not set # CONFIG_FPU is not set @@ -12,7 +11,6 @@ CONFIG_GENERIC_CSUM=y CONFIG_GENERIC_BUG=y CONFIG_ZONE_DMA=y CONFIG_GENERIC_FIND_NEXT_BIT=y -CONFIG_GENERIC_HWEIGHT=y CONFIG_GENERIC_HARDIRQS=y CONFIG_GENERIC_IRQ_PROBE=y CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y @@ -49,11 +47,12 @@ CONFIG_SYSVIPC_SYSCTL=y # # RCU Subsystem # -CONFIG_CLASSIC_RCU=y -# CONFIG_TREE_RCU is not set -# CONFIG_PREEMPT_RCU is not set +CONFIG_TREE_RCU=y +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_RCU_TRACE is not set +CONFIG_RCU_FANOUT=32 +# CONFIG_RCU_FANOUT_EXACT is not set # CONFIG_TREE_RCU_TRACE is not set -# CONFIG_PREEMPT_RCU_TRACE is not set CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 @@ -89,17 +88,15 @@ CONFIG_EPOLL=y # CONFIG_AIO is not set # -# Performance Counters +# Kernel Performance Events And Counters # CONFIG_VM_EVENT_COUNTERS=y -# CONFIG_STRIP_ASM_SYMS is not set CONFIG_COMPAT_BRK=y CONFIG_SLAB=y # CONFIG_SLUB is not set # CONFIG_SLOB is not set CONFIG_MMAP_ALLOW_UNINITIALIZED=y # CONFIG_PROFILING is not set -# CONFIG_MARKERS is not set CONFIG_HAVE_OPROFILE=y # @@ -163,15 +160,15 @@ CONFIG_PREEMPT_VOLUNTARY=y # CONFIG_BF537 is not set # CONFIG_BF538 is not set # CONFIG_BF539 is not set -# CONFIG_BF542 is not set +# CONFIG_BF542_std is not set # CONFIG_BF542M is not set -# CONFIG_BF544 is not set +# CONFIG_BF544_std is not set # CONFIG_BF544M is not set -# CONFIG_BF547 is not set +# CONFIG_BF547_std is not set # CONFIG_BF547M is not set -# CONFIG_BF548 is not set +# CONFIG_BF548_std is not set # CONFIG_BF548M is not set -# CONFIG_BF549 is not set +# CONFIG_BF549_std is not set # CONFIG_BF549M is not set CONFIG_BF561=y # CONFIG_SMP is not set @@ -180,9 +177,9 @@ CONFIG_BF_REV_MAX=5 # CONFIG_BF_REV_0_0 is not set # CONFIG_BF_REV_0_1 is not set # CONFIG_BF_REV_0_2 is not set -# CONFIG_BF_REV_0_3 is not set +CONFIG_BF_REV_0_3=y # CONFIG_BF_REV_0_4 is not set -CONFIG_BF_REV_0_5=y +# CONFIG_BF_REV_0_5 is not set # CONFIG_BF_REV_0_6 is not set # CONFIG_BF_REV_ANY is not set # CONFIG_BF_REV_NONE is not set @@ -298,7 +295,7 @@ CONFIG_GENERIC_TIME=y CONFIG_GENERIC_CLOCKEVENTS=y # CONFIG_TICKSOURCE_GPTMR0 is not set CONFIG_TICKSOURCE_CORETMR=y -# CONFIG_CYCLES_CLOCKSOURCE is not set +CONFIG_CYCLES_CLOCKSOURCE=y # CONFIG_GPTMR0_CLOCKSOURCE is not set CONFIG_TICK_ONESHOT=y # CONFIG_NO_HZ is not set @@ -353,12 +350,14 @@ CONFIG_SPLIT_PTLOCK_CPUS=4 # CONFIG_PHYS_ADDR_T_64BIT is not set CONFIG_ZONE_DMA_FLAG=1 CONFIG_VIRT_TO_BUS=y -CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 CONFIG_BFIN_GPTIMERS=m # CONFIG_DMA_UNCACHED_4M is not set # CONFIG_DMA_UNCACHED_2M is not set CONFIG_DMA_UNCACHED_1M=y +# CONFIG_DMA_UNCACHED_512K is not set +# CONFIG_DMA_UNCACHED_256K is not set +# CONFIG_DMA_UNCACHED_128K is not set # CONFIG_DMA_UNCACHED_NONE is not set # @@ -370,9 +369,11 @@ CONFIG_BFIN_EXTMEM_ICACHEABLE=y CONFIG_BFIN_DCACHE=y # CONFIG_BFIN_DCACHE_BANKA is not set CONFIG_BFIN_EXTMEM_DCACHEABLE=y -CONFIG_BFIN_EXTMEM_WRITEBACK=y -# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set -# CONFIG_BFIN_L2_DCACHEABLE is not set +# CONFIG_BFIN_EXTMEM_WRITEBACK is not set +CONFIG_BFIN_EXTMEM_WRITETHROUGH=y +CONFIG_BFIN_L2_DCACHEABLE=y +# CONFIG_BFIN_L2_WRITEBACK is not set +CONFIG_BFIN_L2_WRITETHROUGH=y # # Memory Protection Unit @@ -472,6 +473,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic" # CONFIG_NETFILTER is not set # CONFIG_IP_DCCP is not set # CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set # CONFIG_TIPC is not set # CONFIG_ATM is not set # CONFIG_BRIDGE is not set @@ -613,6 +615,7 @@ CONFIG_MTD_PHYSMAP=m # # CONFIG_MTD_DATAFLASH is not set # CONFIG_MTD_M25P80 is not set +# CONFIG_MTD_SST25L is not set # CONFIG_MTD_SLRAM is not set # CONFIG_MTD_PHRAM is not set # CONFIG_MTD_MTDRAM is not set @@ -685,6 +688,7 @@ CONFIG_SMC91X=y # CONFIG_ETHOC is not set # CONFIG_SMSC911X is not set # CONFIG_DNET is not set +# CONFIG_ADF702X is not set # CONFIG_IBM_NEW_EMAC_ZMII is not set # CONFIG_IBM_NEW_EMAC_RGMII is not set # CONFIG_IBM_NEW_EMAC_TAH is not set @@ -695,14 +699,10 @@ CONFIG_SMC91X=y # CONFIG_B44 is not set # CONFIG_KS8842 is not set # CONFIG_KS8851 is not set +# CONFIG_KS8851_MLL is not set # CONFIG_NETDEV_1000 is not set # CONFIG_NETDEV_10000 is not set - -# -# Wireless LAN -# -# CONFIG_WLAN_PRE80211 is not set -# CONFIG_WLAN_80211 is not set +# CONFIG_WLAN is not set # # Enable WiMAX (Networking options) to see the WiMAX drivers @@ -782,11 +782,6 @@ CONFIG_SERIAL_CORE_CONSOLE=y CONFIG_UNIX98_PTYS=y # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set # CONFIG_LEGACY_PTYS is not set - -# -# CAN, the car bus and industrial fieldbus -# -# CONFIG_CAN4LINUX is not set # CONFIG_IPMI_HANDLER is not set # CONFIG_HW_RANDOM is not set # CONFIG_R3964 is not set @@ -838,11 +833,15 @@ CONFIG_GPIO_SYSFS=y # # CONFIG_GPIO_MAX7301 is not set # CONFIG_GPIO_MCP23S08 is not set +# CONFIG_GPIO_MC33880 is not set + +# +# AC97 GPIO expanders: +# # CONFIG_W1 is not set # CONFIG_POWER_SUPPLY is not set # CONFIG_HWMON is not set # CONFIG_THERMAL is not set -# CONFIG_THERMAL_HWMON is not set CONFIG_WATCHDOG=y # CONFIG_WATCHDOG_NOWAYOUT is not set @@ -865,6 +864,7 @@ CONFIG_SSB_POSSIBLE=y # CONFIG_MFD_SM501 is not set # CONFIG_HTC_PASIC3 is not set # CONFIG_MFD_TMIO is not set +# CONFIG_MFD_MC13783 is not set # CONFIG_EZX_PCAP is not set # CONFIG_REGULATOR is not set # CONFIG_MEDIA_SUPPORT is not set @@ -884,7 +884,6 @@ CONFIG_SSB_POSSIBLE=y # CONFIG_SOUND is not set CONFIG_HID_SUPPORT=y CONFIG_HID=m -# CONFIG_HID_DEBUG is not set # CONFIG_HIDRAW is not set # CONFIG_HID_PID is not set @@ -923,6 +922,7 @@ CONFIG_HID=m # CONFIG_XFS_FS is not set # CONFIG_OCFS2_FS is not set # CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set CONFIG_FILE_LOCKING=y CONFIG_FSNOTIFY=y # CONFIG_DNOTIFY is not set @@ -957,7 +957,6 @@ CONFIG_INOTIFY_USER=y CONFIG_PROC_FS=y CONFIG_PROC_SYSCTL=y CONFIG_SYSFS=y -# CONFIG_TMPFS is not set # CONFIG_HUGETLB_PAGE is not set # CONFIG_CONFIGFS_FS is not set CONFIG_MISC_FILESYSTEMS=y @@ -989,7 +988,6 @@ CONFIG_JFFS2_RTIME=y # CONFIG_ROMFS_FS is not set # CONFIG_SYSV_FS is not set # CONFIG_UFS_FS is not set -# CONFIG_NILFS2_FS is not set CONFIG_NETWORK_FILESYSTEMS=y CONFIG_NFS_FS=m CONFIG_NFS_V3=y @@ -1064,6 +1062,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y CONFIG_ENABLE_MUST_CHECK=y CONFIG_FRAME_WARN=1024 # CONFIG_MAGIC_SYSRQ is not set +# CONFIG_STRIP_ASM_SYMS is not set # CONFIG_UNUSED_SYMBOLS is not set CONFIG_DEBUG_FS=y # CONFIG_HEADERS_CHECK is not set @@ -1098,26 +1097,24 @@ CONFIG_DEBUG_INFO=y # CONFIG_DEBUG_LIST is not set # CONFIG_DEBUG_SG is not set # CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set # CONFIG_FRAME_POINTER is not set # CONFIG_BOOT_PRINTK_DELAY is not set # CONFIG_RCU_TORTURE_TEST is not set # CONFIG_RCU_CPU_STALL_DETECTOR is not set # CONFIG_BACKTRACE_SELF_TEST is not set # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set # CONFIG_FAULT_INJECTION is not set # CONFIG_PAGE_POISONING is not set CONFIG_HAVE_FUNCTION_TRACER=y CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y CONFIG_TRACING_SUPPORT=y # CONFIG_FTRACE is not set -# CONFIG_BRANCH_PROFILE_NONE is not set -# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set -# CONFIG_PROFILE_ALL_BRANCHES is not set # CONFIG_DYNAMIC_DEBUG is not set # CONFIG_SAMPLES is not set CONFIG_HAVE_ARCH_KGDB=y # CONFIG_KGDB is not set -# CONFIG_KMEMCHECK is not set # CONFIG_DEBUG_STACKOVERFLOW is not set # CONFIG_DEBUG_STACK_USAGE is not set CONFIG_DEBUG_VERBOSE=y @@ -1153,7 +1150,6 @@ CONFIG_CRYPTO=y # # Crypto core or helper # -# CONFIG_CRYPTO_FIPS is not set # CONFIG_CRYPTO_MANAGER is not set # CONFIG_CRYPTO_MANAGER2 is not set # CONFIG_CRYPTO_GF128MUL is not set @@ -1185,11 +1181,13 @@ CONFIG_CRYPTO=y # # CONFIG_CRYPTO_HMAC is not set # CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set # # Digest # # CONFIG_CRYPTO_CRC32C is not set +# CONFIG_CRYPTO_GHASH is not set # CONFIG_CRYPTO_MD4 is not set # CONFIG_CRYPTO_MD5 is not set # CONFIG_CRYPTO_MICHAEL_MIC is not set diff --git a/arch/blackfin/configs/H8606_defconfig b/arch/blackfin/configs/H8606_defconfig index bc7fae3d8b83..ed0a7ebeb85c 100644 --- a/arch/blackfin/configs/H8606_defconfig +++ b/arch/blackfin/configs/H8606_defconfig @@ -834,13 +834,6 @@ CONFIG_SND_VERBOSE_PROCFS=y # # ALSA Blackfin devices # -CONFIG_SND_BLACKFIN_AD1836=m -CONFIG_SND_BLACKFIN_AD1836_TDM=y -# CONFIG_SND_BLACKFIN_AD1836_I2S is not set -CONFIG_SND_BLACKFIN_AD1836_MULSUB=y -# CONFIG_SND_BLACKFIN_AD1836_5P1 is not set -CONFIG_SND_BLACKFIN_SPORT=0 -CONFIG_SND_BLACKFIN_SPI_PFBIT=4 # CONFIG_SND_BFIN_AD73311 is not set # diff --git a/arch/blackfin/configs/PNAV-10_defconfig b/arch/blackfin/configs/PNAV-10_defconfig index 67d12768602a..ad58fede1f41 100644 --- a/arch/blackfin/configs/PNAV-10_defconfig +++ b/arch/blackfin/configs/PNAV-10_defconfig @@ -1,21 +1,27 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.28.10 +# Linux kernel version: 2.6.32.2 # # CONFIG_MMU is not set # CONFIG_FPU is not set CONFIG_RWSEM_GENERIC_SPINLOCK=y # CONFIG_RWSEM_XCHGADD_ALGORITHM is not set CONFIG_BLACKFIN=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_BUG=y CONFIG_ZONE_DMA=y CONFIG_GENERIC_FIND_NEXT_BIT=y -CONFIG_GENERIC_HWEIGHT=y CONFIG_GENERIC_HARDIRQS=y CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y CONFIG_GENERIC_GPIO=y CONFIG_FORCE_MAX_ZONEORDER=14 CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y # # General setup @@ -25,16 +31,32 @@ CONFIG_BROKEN_ON_SMP=y CONFIG_INIT_ENV_ARG_LIMIT=32 CONFIG_LOCALVERSION="" CONFIG_LOCALVERSION_AUTO=y +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_BZIP2=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_BZIP2 is not set +# CONFIG_KERNEL_LZMA is not set CONFIG_SYSVIPC=y CONFIG_SYSVIPC_SYSCTL=y # CONFIG_POSIX_MQUEUE is not set # CONFIG_BSD_PROCESS_ACCT is not set # CONFIG_TASKSTATS is not set # CONFIG_AUDIT is not set + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_RCU_TRACE is not set +CONFIG_RCU_FANOUT=32 +# CONFIG_RCU_FANOUT_EXACT is not set +# CONFIG_TREE_RCU_TRACE is not set # CONFIG_IKCONFIG is not set CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_CGROUPS is not set # CONFIG_GROUP_SCHED is not set +# CONFIG_CGROUPS is not set # CONFIG_SYSFS_DEPRECATED_V2 is not set # CONFIG_RELAY is not set # CONFIG_NAMESPACES is not set @@ -58,6 +80,10 @@ CONFIG_SIGNALFD=y CONFIG_TIMERFD=y CONFIG_EVENTFD=y # CONFIG_AIO is not set + +# +# Kernel Performance Events And Counters +# CONFIG_VM_EVENT_COUNTERS=y CONFIG_COMPAT_BRK=y CONFIG_SLAB=y @@ -65,11 +91,14 @@ CONFIG_SLAB=y # CONFIG_SLOB is not set CONFIG_MMAP_ALLOW_UNINITIALIZED=y # CONFIG_PROFILING is not set -# CONFIG_MARKERS is not set CONFIG_HAVE_OPROFILE=y + +# +# GCOV-based kernel profiling +# +# CONFIG_SLOW_WORK is not set # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set CONFIG_SLABINFO=y -CONFIG_TINY_SHMEM=y CONFIG_BASE_SMALL=0 CONFIG_MODULES=y # CONFIG_MODULE_FORCE_LOAD is not set @@ -77,11 +106,8 @@ CONFIG_MODULE_UNLOAD=y # CONFIG_MODULE_FORCE_UNLOAD is not set # CONFIG_MODVERSIONS is not set # CONFIG_MODULE_SRCVERSION_ALL is not set -CONFIG_KMOD=y CONFIG_BLOCK=y -# CONFIG_LBD is not set -# CONFIG_BLK_DEV_IO_TRACE is not set -# CONFIG_LSF is not set +# CONFIG_LBDAF is not set # CONFIG_BLK_DEV_BSG is not set # CONFIG_BLK_DEV_INTEGRITY is not set @@ -97,7 +123,6 @@ CONFIG_DEFAULT_AS=y # CONFIG_DEFAULT_CFQ is not set # CONFIG_DEFAULT_NOOP is not set CONFIG_DEFAULT_IOSCHED="anticipatory" -CONFIG_CLASSIC_RCU=y # CONFIG_PREEMPT_NONE is not set CONFIG_PREEMPT_VOLUNTARY=y # CONFIG_PREEMPT is not set @@ -128,15 +153,15 @@ CONFIG_PREEMPT_VOLUNTARY=y CONFIG_BF537=y # CONFIG_BF538 is not set # CONFIG_BF539 is not set -# CONFIG_BF542 is not set +# CONFIG_BF542_std is not set # CONFIG_BF542M is not set -# CONFIG_BF544 is not set +# CONFIG_BF544_std is not set # CONFIG_BF544M is not set -# CONFIG_BF547 is not set +# CONFIG_BF547_std is not set # CONFIG_BF547M is not set -# CONFIG_BF548 is not set +# CONFIG_BF548_std is not set # CONFIG_BF548M is not set -# CONFIG_BF549 is not set +# CONFIG_BF549_std is not set # CONFIG_BF549M is not set # CONFIG_BF561 is not set CONFIG_BF_REV_MIN=2 @@ -180,7 +205,8 @@ CONFIG_IRQ_MEM_DMA1=13 CONFIG_IRQ_WATCH=13 CONFIG_IRQ_SPI=10 # CONFIG_BFIN537_STAMP is not set -# CONFIG_BFIN537_BLUETECHNIX_CM is not set +# CONFIG_BFIN537_BLUETECHNIX_CM_E is not set +# CONFIG_BFIN537_BLUETECHNIX_CM_U is not set # CONFIG_BFIN537_BLUETECHNIX_TCM is not set CONFIG_PNAV10=y # CONFIG_CAMSIG_MINOTAUR is not set @@ -282,7 +308,6 @@ CONFIG_FLATMEM=y CONFIG_FLAT_NODE_MEM_MAP=y CONFIG_PAGEFLAGS_EXTENDED=y CONFIG_SPLIT_PTLOCK_CPUS=4 -# CONFIG_RESOURCES_64BIT is not set # CONFIG_PHYS_ADDR_T_64BIT is not set CONFIG_ZONE_DMA_FLAG=1 CONFIG_VIRT_TO_BUS=y @@ -291,16 +316,18 @@ CONFIG_BFIN_GPTIMERS=y # CONFIG_DMA_UNCACHED_4M is not set # CONFIG_DMA_UNCACHED_2M is not set CONFIG_DMA_UNCACHED_1M=y +# CONFIG_DMA_UNCACHED_512K is not set +# CONFIG_DMA_UNCACHED_256K is not set +# CONFIG_DMA_UNCACHED_128K is not set # CONFIG_DMA_UNCACHED_NONE is not set # # Cache Support # CONFIG_BFIN_ICACHE=y -# CONFIG_BFIN_ICACHE_LOCK is not set +CONFIG_BFIN_EXTMEM_ICACHEABLE=y CONFIG_BFIN_DCACHE=y # CONFIG_BFIN_DCACHE_BANKA is not set -CONFIG_BFIN_EXTMEM_ICACHEABLE=y CONFIG_BFIN_EXTMEM_DCACHEABLE=y CONFIG_BFIN_EXTMEM_WRITEBACK=y # CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set @@ -311,7 +338,7 @@ CONFIG_BFIN_EXTMEM_WRITEBACK=y # CONFIG_MPU is not set # -# Asynchonous Memory Configuration +# Asynchronous Memory Configuration # # @@ -367,11 +394,6 @@ CONFIG_NET=y CONFIG_PACKET=y # CONFIG_PACKET_MMAP is not set CONFIG_UNIX=y -CONFIG_XFRM=y -# CONFIG_XFRM_USER is not set -# CONFIG_XFRM_SUB_POLICY is not set -# CONFIG_XFRM_MIGRATE is not set -# CONFIG_XFRM_STATISTICS is not set # CONFIG_NET_KEY is not set CONFIG_INET=y # CONFIG_IP_MULTICAST is not set @@ -395,7 +417,6 @@ CONFIG_IP_PNP=y # CONFIG_INET_XFRM_MODE_BEET is not set # CONFIG_INET_LRO is not set # CONFIG_INET_DIAG is not set -CONFIG_INET_TCP_DIAG=y # CONFIG_TCP_CONG_ADVANCED is not set CONFIG_TCP_CONG_CUBIC=y CONFIG_DEFAULT_TCP_CONG="cubic" @@ -406,6 +427,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic" # CONFIG_NETFILTER is not set # CONFIG_IP_DCCP is not set # CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set # CONFIG_TIPC is not set # CONFIG_ATM is not set # CONFIG_BRIDGE is not set @@ -419,7 +441,10 @@ CONFIG_DEFAULT_TCP_CONG="cubic" # CONFIG_LAPB is not set # CONFIG_ECONET is not set # CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set # CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set # # Network testing @@ -430,13 +455,8 @@ CONFIG_DEFAULT_TCP_CONG="cubic" # CONFIG_IRDA is not set # CONFIG_BT is not set # CONFIG_AF_RXRPC is not set -# CONFIG_PHONET is not set -CONFIG_WIRELESS=y -# CONFIG_CFG80211 is not set -CONFIG_WIRELESS_OLD_REGULATORY=y -# CONFIG_WIRELESS_EXT is not set -# CONFIG_MAC80211 is not set -# CONFIG_IEEE80211 is not set +# CONFIG_WIRELESS is not set +# CONFIG_WIMAX is not set # CONFIG_RFKILL is not set # CONFIG_NET_9P is not set @@ -455,6 +475,7 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y # CONFIG_CONNECTOR is not set CONFIG_MTD=y # CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_TESTS is not set # CONFIG_MTD_CONCAT is not set CONFIG_MTD_PARTITIONS=y # CONFIG_MTD_REDBOOT_PARTS is not set @@ -506,6 +527,7 @@ CONFIG_MTD_UCLINUX=y # # CONFIG_MTD_DATAFLASH is not set # CONFIG_MTD_M25P80 is not set +# CONFIG_MTD_SST25L is not set # CONFIG_MTD_SLRAM is not set # CONFIG_MTD_PHRAM is not set # CONFIG_MTD_MTDRAM is not set @@ -521,17 +543,17 @@ CONFIG_MTD_NAND=y # CONFIG_MTD_NAND_VERIFY_WRITE is not set # CONFIG_MTD_NAND_ECC_SMC is not set # CONFIG_MTD_NAND_MUSEUM_IDS is not set -CONFIG_MTD_NAND_BFIN=y -CONFIG_BFIN_NAND_BASE=0x20100000 -CONFIG_BFIN_NAND_CLE=2 -CONFIG_BFIN_NAND_ALE=1 -CONFIG_BFIN_NAND_READY=44 CONFIG_MTD_NAND_IDS=y # CONFIG_MTD_NAND_DISKONCHIP is not set # CONFIG_MTD_NAND_NANDSIM is not set # CONFIG_MTD_NAND_PLATFORM is not set # CONFIG_MTD_ONENAND is not set +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set + # # UBI - Unsorted block images # @@ -549,10 +571,20 @@ CONFIG_BLK_DEV_RAM_SIZE=4096 # CONFIG_ATA_OVER_ETH is not set # CONFIG_BLK_DEV_HD is not set CONFIG_MISC_DEVICES=y -# CONFIG_EEPROM_93CX6 is not set +# CONFIG_AD525X_DPOT is not set # CONFIG_ICS932S401 is not set # CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_ISL29003 is not set # CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set CONFIG_HAVE_IDE=y # CONFIG_IDE is not set @@ -587,6 +619,9 @@ CONFIG_PHYLIB=y # CONFIG_BROADCOM_PHY is not set # CONFIG_ICPLUS_PHY is not set # CONFIG_REALTEK_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_LSI_ET1011C_PHY is not set # CONFIG_FIXED_PHY is not set # CONFIG_MDIO_BITBANG is not set CONFIG_NET_ETHERNET=y @@ -597,9 +632,12 @@ CONFIG_BFIN_TX_DESC_NUM=100 CONFIG_BFIN_RX_DESC_NUM=100 CONFIG_BFIN_MAC_RMII=y # CONFIG_SMC91X is not set -# CONFIG_SMSC911X is not set # CONFIG_DM9000 is not set # CONFIG_ENC28J60 is not set +# CONFIG_ETHOC is not set +# CONFIG_SMSC911X is not set +# CONFIG_DNET is not set +# CONFIG_ADF702X is not set # CONFIG_IBM_NEW_EMAC_ZMII is not set # CONFIG_IBM_NEW_EMAC_RGMII is not set # CONFIG_IBM_NEW_EMAC_TAH is not set @@ -608,15 +646,16 @@ CONFIG_BFIN_MAC_RMII=y # CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set # CONFIG_B44 is not set +# CONFIG_KS8842 is not set +# CONFIG_KS8851 is not set +# CONFIG_KS8851_MLL is not set # CONFIG_NETDEV_1000 is not set # CONFIG_NETDEV_10000 is not set +# CONFIG_WLAN is not set # -# Wireless LAN +# Enable WiMAX (Networking options) to see the WiMAX drivers # -# CONFIG_WLAN_PRE80211 is not set -# CONFIG_WLAN_80211 is not set -# CONFIG_IWLWIFI_LEDS is not set # CONFIG_WAN is not set # CONFIG_PPP is not set # CONFIG_SLIP is not set @@ -649,14 +688,17 @@ CONFIG_INPUT_EVDEV=y # CONFIG_INPUT_JOYSTICK is not set # CONFIG_INPUT_TABLET is not set CONFIG_INPUT_TOUCHSCREEN=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set CONFIG_TOUCHSCREEN_AD7877=y # CONFIG_TOUCHSCREEN_AD7879_I2C is not set # CONFIG_TOUCHSCREEN_AD7879_SPI is not set # CONFIG_TOUCHSCREEN_AD7879 is not set -# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_EETI is not set # CONFIG_TOUCHSCREEN_FUJITSU is not set # CONFIG_TOUCHSCREEN_GUNZE is not set # CONFIG_TOUCHSCREEN_ELO is not set +# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set # CONFIG_TOUCHSCREEN_MTOUCH is not set # CONFIG_TOUCHSCREEN_INEXIO is not set # CONFIG_TOUCHSCREEN_MK712 is not set @@ -665,6 +707,7 @@ CONFIG_TOUCHSCREEN_AD7877=y # CONFIG_TOUCHSCREEN_TOUCHWIN is not set # CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set # CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set CONFIG_INPUT_MISC=y # CONFIG_INPUT_ATI_REMOTE is not set # CONFIG_INPUT_ATI_REMOTE2 is not set @@ -673,7 +716,9 @@ CONFIG_INPUT_MISC=y # CONFIG_INPUT_YEALINK is not set # CONFIG_INPUT_CM109 is not set CONFIG_INPUT_UINPUT=y -# CONFIG_CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_PCF8574 is not set # # Hardware I/O ports @@ -684,16 +729,13 @@ CONFIG_INPUT_UINPUT=y # # Character devices # -# CONFIG_AD9960 is not set CONFIG_BFIN_DMA_INTERFACE=m # CONFIG_BFIN_PPI is not set # CONFIG_BFIN_PPIFCD is not set # CONFIG_BFIN_SIMPLE_TIMER is not set # CONFIG_BFIN_SPI_ADC is not set CONFIG_BFIN_SPORT=y -# CONFIG_BFIN_TIMER_LATENCY is not set # CONFIG_BFIN_TWI_LCD is not set -# CONFIG_SIMPLE_GPIO is not set # CONFIG_VT is not set CONFIG_DEVKMEM=y # CONFIG_BFIN_JTAG_COMM is not set @@ -707,6 +749,7 @@ CONFIG_DEVKMEM=y # # Non-8250 serial port support # +# CONFIG_SERIAL_MAX3100 is not set CONFIG_SERIAL_BFIN=y CONFIG_SERIAL_BFIN_CONSOLE=y CONFIG_SERIAL_BFIN_DMA=y @@ -719,24 +762,17 @@ CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y # CONFIG_SERIAL_BFIN_SPORT is not set CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set # CONFIG_LEGACY_PTYS is not set - -# -# CAN, the car bus and industrial fieldbus -# -CONFIG_CAN4LINUX=y - -# -# linux embedded drivers -# -CONFIG_CAN_BLACKFIN=m # CONFIG_IPMI_HANDLER is not set CONFIG_HW_RANDOM=y +# CONFIG_HW_RANDOM_TIMERIOMEM is not set # CONFIG_R3964 is not set # CONFIG_RAW_DRIVER is not set # CONFIG_TCG_TPM is not set CONFIG_I2C=y CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y CONFIG_I2C_CHARDEV=y CONFIG_I2C_HELPER_AUTO=y @@ -769,14 +805,6 @@ CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 # Miscellaneous I2C Chip support # # CONFIG_DS1682 is not set -# CONFIG_EEPROM_AT24 is not set -# CONFIG_SENSORS_AD5252 is not set -# CONFIG_EEPROM_LEGACY is not set -CONFIG_SENSORS_PCF8574=m -# CONFIG_PCF8575 is not set -# CONFIG_SENSORS_PCA9539 is not set -# CONFIG_SENSORS_PCF8591 is not set -# CONFIG_SENSORS_MAX6875 is not set # CONFIG_SENSORS_TSL2550 is not set # CONFIG_I2C_DEBUG_CORE is not set # CONFIG_I2C_DEBUG_ALGO is not set @@ -792,20 +820,29 @@ CONFIG_SPI_BFIN=y # CONFIG_SPI_BFIN_LOCK is not set # CONFIG_SPI_BFIN_SPORT is not set # CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_GPIO is not set # # SPI Protocol Masters # -# CONFIG_EEPROM_AT25 is not set # CONFIG_SPI_SPIDEV is not set # CONFIG_SPI_TLE62X0 is not set + +# +# PPS support +# +# CONFIG_PPS is not set CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y # CONFIG_GPIOLIB is not set # CONFIG_W1 is not set # CONFIG_POWER_SUPPLY is not set CONFIG_HWMON=y # CONFIG_HWMON_VID is not set -# CONFIG_SENSORS_AD5252 is not set +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# # CONFIG_SENSORS_AD7414 is not set # CONFIG_SENSORS_AD7418 is not set # CONFIG_SENSORS_ADCXX is not set @@ -818,11 +855,13 @@ CONFIG_HWMON=y # CONFIG_SENSORS_ADT7462 is not set # CONFIG_SENSORS_ADT7470 is not set # CONFIG_SENSORS_ADT7473 is not set +# CONFIG_SENSORS_ADT7475 is not set # CONFIG_SENSORS_ATXP1 is not set # CONFIG_SENSORS_DS1621 is not set # CONFIG_SENSORS_F71805F is not set # CONFIG_SENSORS_F71882FG is not set # CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_G760A is not set # CONFIG_SENSORS_GL518SM is not set # CONFIG_SENSORS_GL520SM is not set # CONFIG_SENSORS_IT87 is not set @@ -838,17 +877,24 @@ CONFIG_HWMON=y # CONFIG_SENSORS_LM90 is not set # CONFIG_SENSORS_LM92 is not set # CONFIG_SENSORS_LM93 is not set +# CONFIG_SENSORS_LTC4215 is not set +# CONFIG_SENSORS_LTC4245 is not set +# CONFIG_SENSORS_LM95241 is not set # CONFIG_SENSORS_MAX1111 is not set # CONFIG_SENSORS_MAX1619 is not set # CONFIG_SENSORS_MAX6650 is not set # CONFIG_SENSORS_PC87360 is not set # CONFIG_SENSORS_PC87427 is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_SENSORS_SHT15 is not set # CONFIG_SENSORS_DME1737 is not set # CONFIG_SENSORS_SMSC47M1 is not set # CONFIG_SENSORS_SMSC47M192 is not set # CONFIG_SENSORS_SMSC47B397 is not set # CONFIG_SENSORS_ADS7828 is not set # CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP401 is not set +# CONFIG_SENSORS_TMP421 is not set # CONFIG_SENSORS_VT1211 is not set # CONFIG_SENSORS_W83781D is not set # CONFIG_SENSORS_W83791D is not set @@ -858,9 +904,8 @@ CONFIG_HWMON=y # CONFIG_SENSORS_W83L786NG is not set # CONFIG_SENSORS_W83627HF is not set # CONFIG_SENSORS_W83627EHF is not set -# CONFIG_HWMON_DEBUG_CHIP is not set +# CONFIG_SENSORS_LIS3_SPI is not set # CONFIG_THERMAL is not set -# CONFIG_THERMAL_HWMON is not set # CONFIG_WATCHDOG is not set CONFIG_SSB_POSSIBLE=y @@ -875,28 +920,19 @@ CONFIG_SSB_POSSIBLE=y # CONFIG_MFD_CORE is not set # CONFIG_MFD_SM501 is not set # CONFIG_HTC_PASIC3 is not set +# CONFIG_TWL4030_CORE is not set # CONFIG_MFD_TMIO is not set # CONFIG_PMIC_DA903X is not set # CONFIG_PMIC_ADP5520 is not set # CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X is not set # CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_MC13783 is not set +# CONFIG_AB3100_CORE is not set +# CONFIG_EZX_PCAP is not set # CONFIG_REGULATOR is not set - -# -# Multimedia devices -# - -# -# Multimedia core support -# -# CONFIG_VIDEO_DEV is not set -# CONFIG_DVB_CORE is not set -# CONFIG_VIDEO_MEDIA is not set - -# -# Multimedia drivers -# -CONFIG_DAB=y +# CONFIG_MEDIA_SUPPORT is not set # # Graphics support @@ -928,24 +964,24 @@ CONFIG_FB_CFB_IMAGEBLIT=y # CONFIG_FB_BFIN_T350MCQB is not set # CONFIG_FB_BFIN_LQ035Q1 is not set CONFIG_FB_BF537_LQ035=y -CONFIG_LQ035_SLAVE_ADDR=0x58 -CONFIG_FB_BFIN_LANDSCAPE=y -# CONFIG_FB_BFIN_BGR is not set # CONFIG_FB_BFIN_7393 is not set # CONFIG_FB_HITACHI_TX09 is not set # CONFIG_FB_S1D13XXX is not set # CONFIG_FB_VIRTUAL is not set # CONFIG_FB_METRONOME is not set # CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set CONFIG_BACKLIGHT_LCD_SUPPORT=y CONFIG_LCD_CLASS_DEVICE=y +# CONFIG_LCD_LMS283GF05 is not set # CONFIG_LCD_LTV350QV is not set # CONFIG_LCD_ILI9320 is not set # CONFIG_LCD_TDO24M is not set # CONFIG_LCD_VGG2432A4 is not set # CONFIG_LCD_PLATFORM is not set CONFIG_BACKLIGHT_CLASS_DEVICE=y -# CONFIG_BACKLIGHT_CORGI is not set +CONFIG_BACKLIGHT_GENERIC=y +# CONFIG_BACKLIGHT_ADP8870 is not set # # Display device support @@ -954,6 +990,7 @@ CONFIG_BACKLIGHT_CLASS_DEVICE=y # CONFIG_LOGO is not set CONFIG_SOUND=y CONFIG_SOUND_OSS_CORE=y +CONFIG_SOUND_OSS_CORE_PRECLAIM=y CONFIG_SND=m # CONFIG_SND_SEQUENCER is not set # CONFIG_SND_MIXER_OSS is not set @@ -963,6 +1000,11 @@ CONFIG_SND=m # CONFIG_SND_VERBOSE_PROCFS is not set # CONFIG_SND_VERBOSE_PRINTK is not set # CONFIG_SND_DEBUG is not set +# CONFIG_SND_RAWMIDI_SEQ is not set +# CONFIG_SND_OPL3_LIB_SEQ is not set +# CONFIG_SND_OPL4_LIB_SEQ is not set +# CONFIG_SND_SBAWE_SEQ is not set +# CONFIG_SND_EMU10K1_SEQ is not set CONFIG_SND_DRIVERS=y # CONFIG_SND_DUMMY is not set # CONFIG_SND_MTPAV is not set @@ -973,7 +1015,6 @@ CONFIG_SND_SPI=y # # ALSA Blackfin devices # -# CONFIG_SND_BLACKFIN_AD1836 is not set # CONFIG_SND_BFIN_AD73322 is not set # CONFIG_SND_SOC is not set CONFIG_SOUND_PRIME=y @@ -993,9 +1034,13 @@ CONFIG_USB_ARCH_HAS_HCD=y # # -# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed; +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may # # CONFIG_USB_GADGET is not set + +# +# OTG and related infrastructure +# # CONFIG_MMC is not set # CONFIG_MEMSTICK is not set # CONFIG_NEW_LEDS is not set @@ -1031,6 +1076,7 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_S35390A is not set # CONFIG_RTC_DRV_FM3130 is not set # CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set # # SPI RTC drivers @@ -1042,6 +1088,7 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_R9701 is not set # CONFIG_RTC_DRV_RS5C348 is not set # CONFIG_RTC_DRV_DS3234 is not set +# CONFIG_RTC_DRV_PCF2123 is not set # # Platform RTC drivers @@ -1062,9 +1109,20 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_BFIN=y # CONFIG_DMADEVICES is not set +# CONFIG_AUXDISPLAY is not set # CONFIG_UIO is not set + +# +# TI VLYNQ +# # CONFIG_STAGING is not set +# +# Firmware Drivers +# +# CONFIG_FIRMWARE_MEMMAP is not set +# CONFIG_SIGMA is not set + # # File systems # @@ -1078,9 +1136,13 @@ CONFIG_FS_MBCACHE=y # CONFIG_REISERFS_FS is not set # CONFIG_JFS_FS is not set # CONFIG_FS_POSIX_ACL is not set -CONFIG_FILE_LOCKING=y # CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set # CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y # CONFIG_DNOTIFY is not set CONFIG_INOTIFY=y CONFIG_INOTIFY_USER=y @@ -1089,6 +1151,11 @@ CONFIG_INOTIFY_USER=y # CONFIG_AUTOFS4_FS is not set # CONFIG_FUSE_FS is not set +# +# Caches +# +# CONFIG_FSCACHE is not set + # # CD-ROM/DVD Filesystems # @@ -1108,13 +1175,9 @@ CONFIG_INOTIFY_USER=y CONFIG_PROC_FS=y CONFIG_PROC_SYSCTL=y CONFIG_SYSFS=y -# CONFIG_TMPFS is not set # CONFIG_HUGETLB_PAGE is not set # CONFIG_CONFIGFS_FS is not set - -# -# Miscellaneous filesystems -# +CONFIG_MISC_FILESYSTEMS=y # CONFIG_ADFS_FS is not set # CONFIG_AFFS_FS is not set # CONFIG_HFS_FS is not set @@ -1123,17 +1186,8 @@ CONFIG_SYSFS=y # CONFIG_BFS_FS is not set # CONFIG_EFS_FS is not set # CONFIG_JFFS2_FS is not set -CONFIG_YAFFS_FS=y -CONFIG_YAFFS_YAFFS1=y -# CONFIG_YAFFS_9BYTE_TAGS is not set -# CONFIG_YAFFS_DOES_ECC is not set -CONFIG_YAFFS_YAFFS2=y -CONFIG_YAFFS_AUTO_YAFFS2=y -# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set -# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set -# CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED is not set -CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y # CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set # CONFIG_VXFS_FS is not set # CONFIG_MINIX_FS is not set # CONFIG_OMFS_FS is not set @@ -1152,7 +1206,6 @@ CONFIG_LOCKD=m CONFIG_LOCKD_V4=y CONFIG_NFS_COMMON=y CONFIG_SUNRPC=m -# CONFIG_SUNRPC_REGISTER_V4 is not set # CONFIG_RPCSEC_GSS_KRB5 is not set # CONFIG_RPCSEC_GSS_SPKM3 is not set CONFIG_SMB_FS=m @@ -1217,18 +1270,19 @@ CONFIG_ENABLE_WARN_DEPRECATED=y CONFIG_ENABLE_MUST_CHECK=y CONFIG_FRAME_WARN=1024 # CONFIG_MAGIC_SYSRQ is not set +# CONFIG_STRIP_ASM_SYMS is not set # CONFIG_UNUSED_SYMBOLS is not set # CONFIG_DEBUG_FS is not set # CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_SECTION_MISMATCH=y # CONFIG_DEBUG_KERNEL is not set # CONFIG_DEBUG_BUGVERBOSE is not set # CONFIG_DEBUG_MEMORY_INIT is not set # CONFIG_RCU_CPU_STALL_DETECTOR is not set - -# -# Tracers -# -# CONFIG_DYNAMIC_PRINTK_DEBUG is not set +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_TRACING_SUPPORT=y +# CONFIG_FTRACE is not set # CONFIG_SAMPLES is not set CONFIG_HAVE_ARCH_KGDB=y CONFIG_DEBUG_VERBOSE=y @@ -1245,6 +1299,7 @@ CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=0 # CONFIG_EARLY_PRINTK is not set # CONFIG_CPLB_INFO is not set # CONFIG_ACCESS_CHECK is not set +# CONFIG_BFIN_ISRAM_SELF_TEST is not set # # Security options @@ -1253,14 +1308,14 @@ CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=0 CONFIG_SECURITY=y # CONFIG_SECURITYFS is not set # CONFIG_SECURITY_NETWORK is not set +# CONFIG_SECURITY_PATH is not set # CONFIG_SECURITY_FILE_CAPABILITIES is not set -CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0 +# CONFIG_SECURITY_TOMOYO is not set CONFIG_CRYPTO=y # # Crypto core or helper # -# CONFIG_CRYPTO_FIPS is not set # CONFIG_CRYPTO_MANAGER is not set # CONFIG_CRYPTO_MANAGER2 is not set # CONFIG_CRYPTO_GF128MUL is not set @@ -1292,11 +1347,13 @@ CONFIG_CRYPTO=y # # CONFIG_CRYPTO_HMAC is not set # CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set # # Digest # # CONFIG_CRYPTO_CRC32C is not set +# CONFIG_CRYPTO_GHASH is not set # CONFIG_CRYPTO_MD4 is not set # CONFIG_CRYPTO_MD5 is not set # CONFIG_CRYPTO_MICHAEL_MIC is not set @@ -1333,6 +1390,7 @@ CONFIG_CRYPTO=y # Compression # # CONFIG_CRYPTO_DEFLATE is not set +# CONFIG_CRYPTO_ZLIB is not set # CONFIG_CRYPTO_LZO is not set # @@ -1340,11 +1398,13 @@ CONFIG_CRYPTO=y # # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRYPTO_HW=y +# CONFIG_BINARY_PRINTF is not set # # Library routines # CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y CONFIG_CRC_CCITT=m # CONFIG_CRC16 is not set # CONFIG_CRC_T10DIF is not set @@ -1356,3 +1416,4 @@ CONFIG_ZLIB_INFLATE=y CONFIG_HAS_IOMEM=y CONFIG_HAS_IOPORT=y CONFIG_HAS_DMA=y +CONFIG_NLATTR=y -- cgit v1.2.3 From f05ede3a9416b6eff6c997c12fd749044fb4fdac Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Fri, 25 Dec 2009 15:34:08 +0000 Subject: Blackfin: isram: add unlikely to sanity checks Don't want the compiler assuming the rare sanity checks are the norm and optimize for those paths. Signed-off-by: Mike Frysinger --- arch/blackfin/mm/isram-driver.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/blackfin/mm/isram-driver.c b/arch/blackfin/mm/isram-driver.c index 84cdc5a1c139..9213e2357888 100644 --- a/arch/blackfin/mm/isram-driver.c +++ b/arch/blackfin/mm/isram-driver.c @@ -62,7 +62,7 @@ static void isram_write(const void *addr, uint64_t data) uint32_t cmd; unsigned long flags; - if (addr >= (void *)(L1_CODE_START + L1_CODE_LENGTH)) + if (unlikely(addr >= (void *)(L1_CODE_START + L1_CODE_LENGTH))) return; cmd = IADDR2DTEST(addr) | 2; /* write */ @@ -93,7 +93,7 @@ static uint64_t isram_read(const void *addr) unsigned long flags; uint64_t ret; - if (addr > (void *)(L1_CODE_START + L1_CODE_LENGTH)) + if (unlikely(addr > (void *)(L1_CODE_START + L1_CODE_LENGTH))) return 0; cmd = IADDR2DTEST(addr) | 0; /* read */ @@ -120,7 +120,7 @@ static bool isram_check_addr(const void *addr, size_t n) { if ((addr >= (void *)L1_CODE_START) && (addr < (void *)(L1_CODE_START + L1_CODE_LENGTH))) { - if ((addr + n) > (void *)(L1_CODE_START + L1_CODE_LENGTH)) { + if (unlikely((addr + n) > (void *)(L1_CODE_START + L1_CODE_LENGTH))) { show_stack(NULL, NULL); pr_err("copy involving %p length (%zu) too long\n", addr, n); } -- cgit v1.2.3 From 69e1d8a61d5aa9e03676dc21fdfb750c5a97bb34 Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Fri, 25 Dec 2009 21:06:06 +0000 Subject: Blackfin: unify MMR helpers Signed-off-by: Mike Frysinger --- arch/blackfin/include/asm/def_LPBlackfin.h | 56 +++++++----------------------- 1 file changed, 12 insertions(+), 44 deletions(-) diff --git a/arch/blackfin/include/asm/def_LPBlackfin.h b/arch/blackfin/include/asm/def_LPBlackfin.h index e521f8eb260a..f342ff0319df 100644 --- a/arch/blackfin/include/asm/def_LPBlackfin.h +++ b/arch/blackfin/include/asm/def_LPBlackfin.h @@ -25,62 +25,30 @@ # define NOP_PAD_ANOMALY_05000198 #endif -#define bfin_read8(addr) ({ \ - uint32_t __v; \ +#define _bfin_readX(addr, size, asm_size, asm_ext) ({ \ + u32 __v; \ __asm__ __volatile__( \ NOP_PAD_ANOMALY_05000198 \ - "%0 = b[%1] (z);" \ + "%0 = " #asm_size "[%1]" #asm_ext ";" \ : "=d" (__v) \ : "a" (addr) \ ); \ __v; }) - -#define bfin_read16(addr) ({ \ - uint32_t __v; \ - __asm__ __volatile__( \ - NOP_PAD_ANOMALY_05000198 \ - "%0 = w[%1] (z);" \ - : "=d" (__v) \ - : "a" (addr) \ - ); \ - __v; }) - -#define bfin_read32(addr) ({ \ - uint32_t __v; \ - __asm__ __volatile__( \ - NOP_PAD_ANOMALY_05000198 \ - "%0 = [%1];" \ - : "=d" (__v) \ - : "a" (addr) \ - ); \ - __v; }) - -#define bfin_write8(addr, val) \ +#define _bfin_writeX(addr, val, size, asm_size) \ __asm__ __volatile__( \ NOP_PAD_ANOMALY_05000198 \ - "b[%0] = %1;" \ + #asm_size "[%0] = %1;" \ : \ - : "a" (addr), "d" ((uint8_t)(val)) \ + : "a" (addr), "d" ((u##size)(val)) \ : "memory" \ ) -#define bfin_write16(addr, val) \ - __asm__ __volatile__( \ - NOP_PAD_ANOMALY_05000198 \ - "w[%0] = %1;" \ - : \ - : "a" (addr), "d" ((uint16_t)(val)) \ - : "memory" \ - ) - -#define bfin_write32(addr, val) \ - __asm__ __volatile__( \ - NOP_PAD_ANOMALY_05000198 \ - "[%0] = %1;" \ - : \ - : "a" (addr), "d" (val) \ - : "memory" \ - ) +#define bfin_read8(addr) _bfin_readX(addr, 8, b, (z)) +#define bfin_read16(addr) _bfin_readX(addr, 16, w, (z)) +#define bfin_read32(addr) _bfin_readX(addr, 32, , ) +#define bfin_write8(addr, val) _bfin_writeX(addr, val, 8, b) +#define bfin_write16(addr, val) _bfin_writeX(addr, val, 16, w) +#define bfin_write32(addr, val) _bfin_writeX(addr, val, 32, ) #endif /* __ASSEMBLY__ */ -- cgit v1.2.3 From 0325f25a919ed09d11b16ec8eccf95618dc36601 Mon Sep 17 00:00:00 2001 From: Sonic Zhang Date: Mon, 28 Dec 2009 07:29:57 +0000 Subject: Blackfin: SMP: add support for IRQ affinity Now that the Blackfin IRQ controller supports this, drivers get the normal functionality of controlling which CPU to bind IRQs to. Signed-off-by: Sonic Zhang Signed-off-by: Mike Frysinger --- arch/blackfin/mach-common/ints-priority.c | 38 +++++++++++++++++++++++++++---- 1 file changed, 34 insertions(+), 4 deletions(-) diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c index 5202a6076695..a5d243409d23 100644 --- a/arch/blackfin/mach-common/ints-priority.c +++ b/arch/blackfin/mach-common/ints-priority.c @@ -173,7 +173,12 @@ static void bfin_internal_mask_irq(unsigned int irq) local_irq_restore_hw(flags); } +#ifdef CONFIG_SMP +static void bfin_internal_unmask_irq_affinity(unsigned int irq, + const struct cpumask *affinity) +#else static void bfin_internal_unmask_irq(unsigned int irq) +#endif { unsigned long flags; @@ -186,16 +191,38 @@ static void bfin_internal_unmask_irq(unsigned int irq) local_irq_save_hw(flags); mask_bank = SIC_SYSIRQ(irq) / 32; mask_bit = SIC_SYSIRQ(irq) % 32; - bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) | - (1 << mask_bit)); #ifdef CONFIG_SMP - bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) | - (1 << mask_bit)); + if (cpumask_test_cpu(0, affinity)) +#endif + bfin_write_SIC_IMASK(mask_bank, + bfin_read_SIC_IMASK(mask_bank) | + (1 << mask_bit)); +#ifdef CONFIG_SMP + if (cpumask_test_cpu(1, affinity)) + bfin_write_SICB_IMASK(mask_bank, + bfin_read_SICB_IMASK(mask_bank) | + (1 << mask_bit)); #endif #endif local_irq_restore_hw(flags); } +#ifdef CONFIG_SMP +static void bfin_internal_unmask_irq(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + bfin_internal_unmask_irq_affinity(irq, desc->affinity); +} + +static int bfin_internal_set_affinity(unsigned int irq, const struct cpumask *mask) +{ + bfin_internal_mask_irq(irq); + bfin_internal_unmask_irq_affinity(irq, mask); + + return 0; +} +#endif + #ifdef CONFIG_PM int bfin_internal_set_wake(unsigned int irq, unsigned int state) { @@ -271,6 +298,9 @@ static struct irq_chip bfin_internal_irqchip = { .mask_ack = bfin_internal_mask_irq, .disable = bfin_internal_mask_irq, .enable = bfin_internal_unmask_irq, +#ifdef CONFIG_SMP + .set_affinity = bfin_internal_set_affinity, +#endif #ifdef CONFIG_PM .set_wake = bfin_internal_set_wake, #endif -- cgit v1.2.3 From 682f5dc4ed7cdef1f55e40ee505c4346dfa6fa91 Mon Sep 17 00:00:00 2001 From: Graf Yang Date: Mon, 28 Dec 2009 09:27:27 +0000 Subject: Blackfin: drop cpu_callin_map on SMP systems Common API already provides functions for managing online CPUs. Signed-off-by: Graf Yang Signed-off-by: Mike Frysinger --- arch/blackfin/mach-bf561/smp.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/arch/blackfin/mach-bf561/smp.c b/arch/blackfin/mach-bf561/smp.c index 0192532e96a2..ec93f3ef8fa3 100644 --- a/arch/blackfin/mach-bf561/smp.c +++ b/arch/blackfin/mach-bf561/smp.c @@ -14,8 +14,6 @@ static DEFINE_SPINLOCK(boot_lock); -static cpumask_t cpu_callin_map; - /* * platform_init_cpus() - Tell the world about how many cores we * have. This is called while setting up the architecture support @@ -72,7 +70,7 @@ void __cpuinit platform_secondary_init(unsigned int cpu) bfin_setup_cpudata(cpu); /* We are done with local CPU inits, unblock the boot CPU. */ - cpu_set(cpu, cpu_callin_map); + set_cpu_online(cpu, true); spin_lock(&boot_lock); spin_unlock(&boot_lock); } @@ -95,14 +93,13 @@ int __cpuinit platform_boot_secondary(unsigned int cpu, struct task_struct *idle timeout = jiffies + 1 * HZ; while (time_before(jiffies, timeout)) { - if (cpu_isset(cpu, cpu_callin_map)) + if (cpu_online(cpu)) break; udelay(100); barrier(); } - if (cpu_isset(cpu, cpu_callin_map)) { - cpu_set(cpu, cpu_online_map); + if (cpu_online(cpu)) { /* release the lock and let coreb run */ spin_unlock(&boot_lock); return 0; -- cgit v1.2.3 From 0d152c27e336b5fd777da7dd3e814617e7305afd Mon Sep 17 00:00:00 2001 From: Yi Li Date: Mon, 28 Dec 2009 10:21:49 +0000 Subject: Blackfin: SMP: make core timers per-cpu clock events for HRT SMP systems require per-cpu local clock event devices in order to enable HRT support. One a BF561, we can use local core timer for this purpose. Originally, there was one global core-timer clock event device set up for core A. To accomplish this feat, we need to split the gptimer0/core timer logic so that each is a standalone clock event. There is no requirement that we only have one clock event source anyways. Once we have this, we just define per-cpu clock event devices for each local core timer. Signed-off-by: Yi Li Signed-off-by: Mike Frysinger --- arch/blackfin/Kconfig | 26 ++-- arch/blackfin/include/asm/time.h | 6 +- arch/blackfin/kernel/time-ts.c | 197 +++++++++++++++------------- arch/blackfin/mach-bf561/include/mach/smp.h | 2 + arch/blackfin/mach-bf561/smp.c | 18 +++ arch/blackfin/mach-common/ints-priority.c | 3 - arch/blackfin/mach-common/smp.c | 17 +-- 7 files changed, 146 insertions(+), 123 deletions(-) diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index b483639c80b3..0bd26dbca09f 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig @@ -236,7 +236,7 @@ endchoice config SMP depends on BF561 - select GENERIC_CLOCKEVENTS + select TICKSOURCE_CORETMR bool "Symmetric multi-processing support" ---help--- This enables support for systems with more than one CPU, @@ -610,23 +610,23 @@ config GENERIC_CLOCKEVENTS bool "Generic clock events" default y -choice - prompt "Kernel Tick Source" +menu "Clock event device" depends on GENERIC_CLOCKEVENTS - default TICKSOURCE_CORETMR - config TICKSOURCE_GPTMR0 - bool "Gptimer0 (SCLK domain)" + bool "GPTimer0" + depends on !SMP select BFIN_GPTIMERS config TICKSOURCE_CORETMR - bool "Core timer (CCLK domain)" - -endchoice + bool "Core timer" + default y +endmenu -config CYCLES_CLOCKSOURCE - bool "Use 'CYCLES' as a clocksource" +menu "Clock souce" depends on GENERIC_CLOCKEVENTS +config CYCLES_CLOCKSOURCE + bool "CYCLES" + default y depends on !BFIN_SCRATCH_REG_CYCLES depends on !SMP help @@ -637,10 +637,10 @@ config CYCLES_CLOCKSOURCE writing the registers will most likely crash the kernel. config GPTMR0_CLOCKSOURCE - bool "Use GPTimer0 as a clocksource" + bool "GPTimer0" select BFIN_GPTIMERS - depends on GENERIC_CLOCKEVENTS depends on !TICKSOURCE_GPTMR0 +endmenu config ARCH_USES_GETTIMEOFFSET depends on !GENERIC_CLOCKEVENTS diff --git a/arch/blackfin/include/asm/time.h b/arch/blackfin/include/asm/time.h index 589e937ed1eb..767b938ccf8c 100644 --- a/arch/blackfin/include/asm/time.h +++ b/arch/blackfin/include/asm/time.h @@ -37,5 +37,9 @@ extern unsigned long long __bfin_cycles_off; extern unsigned int __bfin_cycles_mod; #endif -extern void __init setup_core_timer(void); +#if defined(CONFIG_TICKSOURCE_CORETMR) +extern void bfin_coretmr_init(void); +extern void bfin_coretmr_clockevent_init(void); +#endif + #endif diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c index 17c38c5b5b22..a351f97c87a3 100644 --- a/arch/blackfin/kernel/time-ts.c +++ b/arch/blackfin/kernel/time-ts.c @@ -132,7 +132,6 @@ static int __init bfin_cs_gptimer0_init(void) # define bfin_cs_gptimer0_init() #endif - #if defined(CONFIG_GPTMR0_CLOCKSOURCE) || defined(CONFIG_CYCLES_CLOCKSOURCE) /* prefer to use cycles since it has higher rating */ notrace unsigned long long sched_clock(void) @@ -145,47 +144,8 @@ notrace unsigned long long sched_clock(void) } #endif -#ifdef CONFIG_CORE_TIMER_IRQ_L1 -__attribute__((l1_text)) -#endif -irqreturn_t timer_interrupt(int irq, void *dev_id); - -static int bfin_timer_set_next_event(unsigned long, \ - struct clock_event_device *); - -static void bfin_timer_set_mode(enum clock_event_mode, \ - struct clock_event_device *); - -static struct clock_event_device clockevent_bfin = { -#if defined(CONFIG_TICKSOURCE_GPTMR0) - .name = "bfin_gptimer0", - .rating = 300, - .irq = IRQ_TIMER0, -#else - .name = "bfin_core_timer", - .rating = 350, - .irq = IRQ_CORETMR, -#endif - .shift = 32, - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .set_next_event = bfin_timer_set_next_event, - .set_mode = bfin_timer_set_mode, -}; - -static struct irqaction bfin_timer_irq = { -#if defined(CONFIG_TICKSOURCE_GPTMR0) - .name = "Blackfin GPTimer0", -#else - .name = "Blackfin CoreTimer", -#endif - .flags = IRQF_DISABLED | IRQF_TIMER | \ - IRQF_IRQPOLL | IRQF_PERCPU, - .handler = timer_interrupt, - .dev_id = &clockevent_bfin, -}; - #if defined(CONFIG_TICKSOURCE_GPTMR0) -static int bfin_timer_set_next_event(unsigned long cycles, +static int bfin_gptmr0_set_next_event(unsigned long cycles, struct clock_event_device *evt) { disable_gptimers(TIMER0bit); @@ -196,7 +156,7 @@ static int bfin_timer_set_next_event(unsigned long cycles, return 0; } -static void bfin_timer_set_mode(enum clock_event_mode mode, +static void bfin_gptmr0_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { switch (mode) { @@ -224,25 +184,65 @@ static void bfin_timer_set_mode(enum clock_event_mode mode, } } -static void bfin_timer_ack(void) +static void bfin_gptmr0_ack(void) { set_gptimer_status(TIMER_GROUP1, TIMER_STATUS_TIMIL0); } -static void __init bfin_timer_init(void) +static void __init bfin_gptmr0_init(void) { disable_gptimers(TIMER0bit); } -static unsigned long __init bfin_clockevent_check(void) +#ifdef CONFIG_CORE_TIMER_IRQ_L1 +__attribute__((l1_text)) +#endif +irqreturn_t bfin_gptmr0_interrupt(int irq, void *dev_id) { - setup_irq(IRQ_TIMER0, &bfin_timer_irq); - return get_sclk(); + struct clock_event_device *evt = dev_id; + smp_mb(); + evt->event_handler(evt); + bfin_gptmr0_ack(); + return IRQ_HANDLED; } -#else /* CONFIG_TICKSOURCE_CORETMR */ +static struct irqaction gptmr0_irq = { + .name = "Blackfin GPTimer0", + .flags = IRQF_DISABLED | IRQF_TIMER | \ + IRQF_IRQPOLL | IRQF_PERCPU, + .handler = bfin_gptmr0_interrupt, +}; -static int bfin_timer_set_next_event(unsigned long cycles, +static struct clock_event_device clockevent_gptmr0 = { + .name = "bfin_gptimer0", + .rating = 300, + .irq = IRQ_TIMER0, + .shift = 32, + .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, + .set_next_event = bfin_gptmr0_set_next_event, + .set_mode = bfin_gptmr0_set_mode, +}; + +static void __init bfin_gptmr0_clockevent_init(struct clock_event_device *evt) +{ + unsigned long clock_tick; + + clock_tick = get_sclk(); + evt->mult = div_sc(clock_tick, NSEC_PER_SEC, evt->shift); + evt->max_delta_ns = clockevent_delta2ns(-1, evt); + evt->min_delta_ns = clockevent_delta2ns(100, evt); + + evt->cpumask = cpumask_of(0); + + clockevents_register_device(evt); +} +#endif /* CONFIG_TICKSOURCE_GPTMR0 */ + +#if defined(CONFIG_TICKSOURCE_CORETMR) +/* per-cpu local core timer */ +static DEFINE_PER_CPU(struct clock_event_device, coretmr_events); + +static int bfin_coretmr_set_next_event(unsigned long cycles, struct clock_event_device *evt) { bfin_write_TCNTL(TMPWR); @@ -253,7 +253,7 @@ static int bfin_timer_set_next_event(unsigned long cycles, return 0; } -static void bfin_timer_set_mode(enum clock_event_mode mode, +static void bfin_coretmr_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { switch (mode) { @@ -285,19 +285,13 @@ static void bfin_timer_set_mode(enum clock_event_mode mode, } } -static void bfin_timer_ack(void) -{ -} - -static void __init bfin_timer_init(void) +void bfin_coretmr_init(void) { /* power up the timer, but don't enable it just yet */ bfin_write_TCNTL(TMPWR); CSYNC(); - /* - * the TSCALE prescaler counter. - */ + /* the TSCALE prescaler counter. */ bfin_write_TSCALE(TIME_SCALE - 1); bfin_write_TPERIOD(0); bfin_write_TCOUNT(0); @@ -305,48 +299,51 @@ static void __init bfin_timer_init(void) CSYNC(); } -static unsigned long __init bfin_clockevent_check(void) -{ - setup_irq(IRQ_CORETMR, &bfin_timer_irq); - return get_cclk() / TIME_SCALE; -} - -void __init setup_core_timer(void) +#ifdef CONFIG_CORE_TIMER_IRQ_L1 +__attribute__((l1_text)) +#endif +irqreturn_t bfin_coretmr_interrupt(int irq, void *dev_id) { - bfin_timer_init(); - bfin_timer_set_mode(CLOCK_EVT_MODE_PERIODIC, NULL); -} -#endif /* CONFIG_TICKSOURCE_GPTMR0 */ + int cpu = smp_processor_id(); + struct clock_event_device *evt = &per_cpu(coretmr_events, cpu); -/* - * timer_interrupt() needs to keep up the real-time clock, - * as well as call the "do_timer()" routine every clocktick - */ -irqreturn_t timer_interrupt(int irq, void *dev_id) -{ - struct clock_event_device *evt = dev_id; smp_mb(); evt->event_handler(evt); - bfin_timer_ack(); return IRQ_HANDLED; } -static int __init bfin_clockevent_init(void) -{ - unsigned long timer_clk; - - timer_clk = bfin_clockevent_check(); - - bfin_timer_init(); - - clockevent_bfin.mult = div_sc(timer_clk, NSEC_PER_SEC, clockevent_bfin.shift); - clockevent_bfin.max_delta_ns = clockevent_delta2ns(-1, &clockevent_bfin); - clockevent_bfin.min_delta_ns = clockevent_delta2ns(100, &clockevent_bfin); - clockevent_bfin.cpumask = cpumask_of(0); - clockevents_register_device(&clockevent_bfin); +static struct irqaction coretmr_irq = { + .name = "Blackfin CoreTimer", + .flags = IRQF_DISABLED | IRQF_TIMER | \ + IRQF_IRQPOLL | IRQF_PERCPU, + .handler = bfin_coretmr_interrupt, +}; - return 0; +void bfin_coretmr_clockevent_init(void) +{ + unsigned long clock_tick; + unsigned int cpu = smp_processor_id(); + struct clock_event_device *evt = &per_cpu(coretmr_events, cpu); + + evt->name = "bfin_core_timer"; + evt->rating = 350; + evt->irq = -1; + evt->shift = 32; + evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; + evt->set_next_event = bfin_coretmr_set_next_event; + evt->set_mode = bfin_coretmr_set_mode; + + clock_tick = get_cclk() / TIME_SCALE; + evt->mult = div_sc(clock_tick, NSEC_PER_SEC, evt->shift); + evt->max_delta_ns = clockevent_delta2ns(-1, evt); + evt->min_delta_ns = clockevent_delta2ns(100, evt); + + evt->cpumask = cpumask_of(cpu); + + clockevents_register_device(evt); } +#endif /* CONFIG_TICKSOURCE_CORETMR */ + void __init time_init(void) { @@ -370,5 +367,21 @@ void __init time_init(void) bfin_cs_cycles_init(); bfin_cs_gptimer0_init(); - bfin_clockevent_init(); + +#if defined(CONFIG_TICKSOURCE_CORETMR) + bfin_coretmr_init(); + setup_irq(IRQ_CORETMR, &coretmr_irq); + bfin_coretmr_clockevent_init(); +#endif + +#if defined(CONFIG_TICKSOURCE_GPTMR0) + bfin_gptmr0_init(); + setup_irq(IRQ_TIMER0, &gptmr0_irq); + gptmr0_irq.dev_id = &clockevent_gptmr0; + bfin_gptmr0_clockevent_init(&clockevent_gptmr0); +#endif + +#if !defined(CONFIG_TICKSOURCE_CORETMR) && !defined(CONFIG_TICKSOURCE_GPTMR0) +# error at least one clock event device is required +#endif } diff --git a/arch/blackfin/mach-bf561/include/mach/smp.h b/arch/blackfin/mach-bf561/include/mach/smp.h index 390c7f4ae7b3..2c8c514dd386 100644 --- a/arch/blackfin/mach-bf561/include/mach/smp.h +++ b/arch/blackfin/mach-bf561/include/mach/smp.h @@ -25,4 +25,6 @@ void platform_send_ipi_cpu(unsigned int cpu); void platform_clear_ipi(unsigned int cpu); +void bfin_local_timer_setup(void); + #endif /* !_MACH_BF561_SMP */ diff --git a/arch/blackfin/mach-bf561/smp.c b/arch/blackfin/mach-bf561/smp.c index ec93f3ef8fa3..90369429ee66 100644 --- a/arch/blackfin/mach-bf561/smp.c +++ b/arch/blackfin/mach-bf561/smp.c @@ -11,6 +11,7 @@ #include #include #include +#include static DEFINE_SPINLOCK(boot_lock); @@ -144,3 +145,20 @@ void platform_clear_ipi(unsigned int cpu) bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (10 + cpu))); SSYNC(); } + +/* + * Setup core B's local core timer. + * In SMP, core timer is used for clock event device. + */ +void __cpuinit bfin_local_timer_setup(void) +{ +#if defined(CONFIG_TICKSOURCE_CORETMR) + bfin_coretmr_init(); + bfin_coretmr_clockevent_init(); + get_irq_chip(IRQ_CORETMR)->unmask(IRQ_CORETMR); +#else + /* Power down the core timer, just to play safe. */ + bfin_write_TCNTL(0); +#endif + +} diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c index a5d243409d23..efbdb6a19418 100644 --- a/arch/blackfin/mach-common/ints-priority.c +++ b/arch/blackfin/mach-common/ints-priority.c @@ -1073,9 +1073,6 @@ int __init init_arch_irq(void) #endif #ifdef CONFIG_SMP -#ifdef CONFIG_TICKSOURCE_GPTMR0 - case IRQ_TIMER0: -#endif #ifdef CONFIG_TICKSOURCE_CORETMR case IRQ_CORETMR: #endif diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c index eddb720c718e..b343ab3764a1 100644 --- a/arch/blackfin/mach-common/smp.c +++ b/arch/blackfin/mach-common/smp.c @@ -365,9 +365,6 @@ int __cpuinit __cpu_up(unsigned int cpu) static void __cpuinit setup_secondary(unsigned int cpu) { -#if !defined(CONFIG_TICKSOURCE_GPTMR0) - struct irq_desc *timer_desc; -#endif unsigned long ilat; bfin_write_IMASK(0); @@ -382,17 +379,6 @@ static void __cpuinit setup_secondary(unsigned int cpu) bfin_irq_flags |= IMASK_IVG15 | IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 | IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW; - -#if defined(CONFIG_TICKSOURCE_GPTMR0) - /* Power down the core timer, just to play safe. */ - bfin_write_TCNTL(0); - - /* system timer0 has been setup by CoreA. */ -#else - timer_desc = irq_desc + IRQ_CORETMR; - setup_core_timer(); - timer_desc->chip->enable(IRQ_CORETMR); -#endif } void __cpuinit secondary_start_kernel(void) @@ -435,6 +421,9 @@ void __cpuinit secondary_start_kernel(void) platform_secondary_init(cpu); + /* setup local core timer */ + bfin_local_timer_setup(); + local_irq_enable(); /* -- cgit v1.2.3 From 0b39db28b953945232719e7ff6fb802aa8a2be5f Mon Sep 17 00:00:00 2001 From: Graf Yang Date: Mon, 28 Dec 2009 11:13:51 +0000 Subject: Blackfin: SMP: add PM/CPU hotplug support Signed-off-by: Graf Yang Signed-off-by: Mike Frysinger --- arch/blackfin/Kconfig | 6 ++++- arch/blackfin/include/asm/smp.h | 7 +++++ arch/blackfin/mach-bf561/Makefile | 1 + arch/blackfin/mach-bf561/hotplug.c | 32 +++++++++++++++++++++++ arch/blackfin/mach-bf561/secondary.S | 50 ++++++++++++++++++++++++++++++++++-- arch/blackfin/mach-bf561/smp.c | 17 +++++++----- arch/blackfin/mach-common/smp.c | 38 ++++++++++++++++++++++++--- 7 files changed, 138 insertions(+), 13 deletions(-) create mode 100644 arch/blackfin/mach-bf561/hotplug.c diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index 0bd26dbca09f..c0d6d966adec 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig @@ -250,6 +250,11 @@ config NR_CPUS depends on SMP default 2 if BF561 +config HOTPLUG_CPU + bool "Support for hot-pluggable CPUs" + depends on SMP && HOTPLUG + default y + config IRQ_PER_CPU bool depends on SMP @@ -1130,7 +1135,6 @@ source "fs/Kconfig.binfmt" endmenu menu "Power management options" - depends on !SMP source "kernel/power/Kconfig" diff --git a/arch/blackfin/include/asm/smp.h b/arch/blackfin/include/asm/smp.h index 6a0fe94b84a6..29fb88219470 100644 --- a/arch/blackfin/include/asm/smp.h +++ b/arch/blackfin/include/asm/smp.h @@ -25,5 +25,12 @@ struct corelock_slot { void smp_icache_flush_range_others(unsigned long start, unsigned long end); +#ifdef CONFIG_HOTPLUG_CPU +void coreb_sleep(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2); +void cpu_die(void); +void platform_cpu_die(void); +int __cpu_disable(void); +int __cpu_die(unsigned int cpu); +#endif #endif /* !__ASM_BLACKFIN_SMP_H */ diff --git a/arch/blackfin/mach-bf561/Makefile b/arch/blackfin/mach-bf561/Makefile index 59e18afe28c6..b34029718318 100644 --- a/arch/blackfin/mach-bf561/Makefile +++ b/arch/blackfin/mach-bf561/Makefile @@ -6,3 +6,4 @@ obj-y := ints-priority.o dma.o obj-$(CONFIG_BF561_COREB) += coreb.o obj-$(CONFIG_SMP) += smp.o secondary.o atomic.o +obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o diff --git a/arch/blackfin/mach-bf561/hotplug.c b/arch/blackfin/mach-bf561/hotplug.c new file mode 100644 index 000000000000..c95169b612dc --- /dev/null +++ b/arch/blackfin/mach-bf561/hotplug.c @@ -0,0 +1,32 @@ +/* + * Copyright 2007-2009 Analog Devices Inc. + * Graff Yang + * + * Licensed under the GPL-2 or later. + */ + +#include +#include +#define SIC_SYSIRQ(irq) (irq - (IRQ_CORETMR + 1)) + +int hotplug_coreb; + +void platform_cpu_die(void) +{ + unsigned long iwr[2] = {0, 0}; + unsigned long bank = SIC_SYSIRQ(IRQ_SUPPLE_0) / 32; + unsigned long bit = 1 << (SIC_SYSIRQ(IRQ_SUPPLE_0) % 32); + + hotplug_coreb = 1; + + iwr[bank] = bit; + + /* disable core timer */ + bfin_write_TCNTL(0); + + /* clear ipi interrupt IRQ_SUPPLE_0 */ + bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (10 + 1))); + SSYNC(); + + coreb_sleep(iwr[0], iwr[1], 0); +} diff --git a/arch/blackfin/mach-bf561/secondary.S b/arch/blackfin/mach-bf561/secondary.S index 8e6050369c06..4624eebbf9c4 100644 --- a/arch/blackfin/mach-bf561/secondary.S +++ b/arch/blackfin/mach-bf561/secondary.S @@ -11,6 +11,7 @@ #include #include #include +#include __INIT @@ -62,6 +63,8 @@ ENTRY(_coreb_trampoline_start) M2 = r0; M3 = r0; + trace_buffer_init(p0,r0); + /* Turn off the icache */ p0.l = LO(IMEM_CONTROL); p0.h = HI(IMEM_CONTROL); @@ -159,6 +162,41 @@ ENTRY(_coreb_trampoline_start) ENDPROC(_coreb_trampoline_start) ENTRY(_coreb_trampoline_end) +.section ".text" +ENTRY(_set_sicb_iwr) + P0.H = hi(SICB_IWR0); + P0.L = lo(SICB_IWR0); + P1.H = hi(SICB_IWR1); + P1.L = lo(SICB_IWR1); + [P0] = R0; + [P1] = R1; + SSYNC; + RTS; +ENDPROC(_set_sicb_iwr) + +ENTRY(_coreb_sleep) + sp.l = lo(INITIAL_STACK); + sp.h = hi(INITIAL_STACK); + fp = sp; + usp = sp; + + call _set_sicb_iwr; + + CLI R2; + SSYNC; + IDLE; + STI R2; + + R0 = IWR_DISABLE_ALL; + R1 = IWR_DISABLE_ALL; + call _set_sicb_iwr; + + p0.h = hi(COREB_L1_CODE_START); + p0.l = lo(COREB_L1_CODE_START); + jump (p0); +ENDPROC(_coreb_sleep) + +__CPUINIT ENTRY(_coreb_start) [--sp] = reti; @@ -176,12 +214,20 @@ ENTRY(_coreb_start) sp = [p0]; usp = sp; fp = sp; +#ifdef CONFIG_HOTPLUG_CPU + p0.l = _hotplug_coreb; + p0.h = _hotplug_coreb; + r0 = [p0]; + cc = BITTST(r0, 0); + if cc jump 3f; +#endif sp += -12; call _init_pda sp += 12; +#ifdef CONFIG_HOTPLUG_CPU +3: +#endif call _secondary_start_kernel; .L_exit: jump.s .L_exit; ENDPROC(_coreb_start) - -__FINIT diff --git a/arch/blackfin/mach-bf561/smp.c b/arch/blackfin/mach-bf561/smp.c index 90369429ee66..3b9a4bf7dacc 100644 --- a/arch/blackfin/mach-bf561/smp.c +++ b/arch/blackfin/mach-bf561/smp.c @@ -65,6 +65,8 @@ void __cpuinit platform_secondary_init(unsigned int cpu) bfin_write_SICB_IAR5(bfin_read_SICA_IAR5()); bfin_write_SICB_IAR6(bfin_read_SICA_IAR6()); bfin_write_SICB_IAR7(bfin_read_SICA_IAR7()); + bfin_write_SICB_IWR0(IWR_DISABLE_ALL); + bfin_write_SICB_IWR1(IWR_DISABLE_ALL); SSYNC(); /* Store CPU-private information to the cpu_data array. */ @@ -80,17 +82,18 @@ int __cpuinit platform_boot_secondary(unsigned int cpu, struct task_struct *idle { unsigned long timeout; - /* CoreB already running?! */ - BUG_ON((bfin_read_SICA_SYSCR() & COREB_SRAM_INIT) == 0); - printk(KERN_INFO "Booting Core B.\n"); spin_lock(&boot_lock); - /* Kick CoreB, which should start execution from CORE_SRAM_BASE. */ - SSYNC(); - bfin_write_SICA_SYSCR(bfin_read_SICA_SYSCR() & ~COREB_SRAM_INIT); - SSYNC(); + if ((bfin_read_SICA_SYSCR() & COREB_SRAM_INIT) == 0) { + /* CoreB already running, sending ipi to wakeup it */ + platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0); + } else { + /* Kick CoreB, which should start execution from CORE_SRAM_BASE. */ + bfin_write_SICA_SYSCR(bfin_read_SICA_SYSCR() & ~COREB_SRAM_INIT); + SSYNC(); + } timeout = jiffies + 1 * HZ; while (time_before(jiffies, timeout)) { diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c index b343ab3764a1..efc47ffd066d 100644 --- a/arch/blackfin/mach-common/smp.c +++ b/arch/blackfin/mach-common/smp.c @@ -344,8 +344,11 @@ void smp_send_stop(void) int __cpuinit __cpu_up(unsigned int cpu) { - struct task_struct *idle; int ret; + static struct task_struct *idle; + + if (idle) + free_task(idle); idle = fork_idle(cpu); if (IS_ERR(idle)) { @@ -354,7 +357,6 @@ int __cpuinit __cpu_up(unsigned int cpu) } secondary_stack = task_stack_page(idle) + THREAD_SIZE; - smp_wmb(); ret = platform_boot_secondary(cpu, idle); @@ -413,7 +415,6 @@ void __cpuinit secondary_start_kernel(void) atomic_inc(&mm->mm_users); atomic_inc(&mm->mm_count); current->active_mm = mm; - BUG_ON(current->mm); /* Can't be, but better be safe than sorry. */ preempt_disable(); @@ -495,3 +496,34 @@ void resync_core_dcache(void) } EXPORT_SYMBOL(resync_core_dcache); #endif + +#ifdef CONFIG_HOTPLUG_CPU +int __cpuexit __cpu_disable(void) +{ + unsigned int cpu = smp_processor_id(); + + if (cpu == 0) + return -EPERM; + + set_cpu_online(cpu, false); + return 0; +} + +static DECLARE_COMPLETION(cpu_killed); + +int __cpuexit __cpu_die(unsigned int cpu) +{ + return wait_for_completion_timeout(&cpu_killed, 5000); +} + +void cpu_die(void) +{ + complete(&cpu_killed); + + atomic_dec(&init_mm.mm_users); + atomic_dec(&init_mm.mm_count); + + local_irq_disable(); + platform_cpu_die(); +} +#endif -- cgit v1.2.3 From 54536c5c6a807d0db7252c3b33638b88f34650ff Mon Sep 17 00:00:00 2001 From: Yi Li Date: Wed, 30 Dec 2009 04:04:07 +0000 Subject: Blackfin: simplify SMP handling in SRAM code There is no need to use {get,put}_cpu() when we already have a spinlock to protect against multiple processors running simultaneously. Signed-off-by: Yi Li Signed-off-by: Mike Frysinger --- arch/blackfin/mm/sram-alloc.c | 27 +++++++++------------------ 1 file changed, 9 insertions(+), 18 deletions(-) diff --git a/arch/blackfin/mm/sram-alloc.c b/arch/blackfin/mm/sram-alloc.c index f068c11ea98f..5732da25ee2d 100644 --- a/arch/blackfin/mm/sram-alloc.c +++ b/arch/blackfin/mm/sram-alloc.c @@ -402,7 +402,7 @@ void *l1_data_A_sram_alloc(size_t size) void *addr; unsigned int cpu; - cpu = get_cpu(); + cpu = smp_processor_id(); /* add mutex operation */ spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags); @@ -411,7 +411,6 @@ void *l1_data_A_sram_alloc(size_t size) /* add mutex operation */ spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags); - put_cpu(); pr_debug("Allocated address in l1_data_A_sram_alloc is 0x%lx+0x%lx\n", (long unsigned int)addr, size); @@ -430,7 +429,7 @@ int l1_data_A_sram_free(const void *addr) int ret; unsigned int cpu; - cpu = get_cpu(); + cpu = smp_processor_id(); /* add mutex operation */ spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags); @@ -439,7 +438,6 @@ int l1_data_A_sram_free(const void *addr) /* add mutex operation */ spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags); - put_cpu(); return ret; #else @@ -455,7 +453,7 @@ void *l1_data_B_sram_alloc(size_t size) void *addr; unsigned int cpu; - cpu = get_cpu(); + cpu = smp_processor_id(); /* add mutex operation */ spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags); @@ -464,7 +462,6 @@ void *l1_data_B_sram_alloc(size_t size) /* add mutex operation */ spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags); - put_cpu(); pr_debug("Allocated address in l1_data_B_sram_alloc is 0x%lx+0x%lx\n", (long unsigned int)addr, size); @@ -483,7 +480,7 @@ int l1_data_B_sram_free(const void *addr) int ret; unsigned int cpu; - cpu = get_cpu(); + cpu = smp_processor_id(); /* add mutex operation */ spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags); @@ -492,7 +489,6 @@ int l1_data_B_sram_free(const void *addr) /* add mutex operation */ spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags); - put_cpu(); return ret; #else @@ -540,7 +536,7 @@ void *l1_inst_sram_alloc(size_t size) void *addr; unsigned int cpu; - cpu = get_cpu(); + cpu = smp_processor_id(); /* add mutex operation */ spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags); @@ -549,7 +545,6 @@ void *l1_inst_sram_alloc(size_t size) /* add mutex operation */ spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags); - put_cpu(); pr_debug("Allocated address in l1_inst_sram_alloc is 0x%lx+0x%lx\n", (long unsigned int)addr, size); @@ -568,7 +563,7 @@ int l1_inst_sram_free(const void *addr) int ret; unsigned int cpu; - cpu = get_cpu(); + cpu = smp_processor_id(); /* add mutex operation */ spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags); @@ -577,7 +572,6 @@ int l1_inst_sram_free(const void *addr) /* add mutex operation */ spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags); - put_cpu(); return ret; #else @@ -593,7 +587,7 @@ void *l1sram_alloc(size_t size) void *addr; unsigned int cpu; - cpu = get_cpu(); + cpu = smp_processor_id(); /* add mutex operation */ spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags); @@ -602,7 +596,6 @@ void *l1sram_alloc(size_t size) /* add mutex operation */ spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags); - put_cpu(); return addr; } @@ -614,7 +607,7 @@ void *l1sram_alloc_max(size_t *psize) void *addr; unsigned int cpu; - cpu = get_cpu(); + cpu = smp_processor_id(); /* add mutex operation */ spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags); @@ -623,7 +616,6 @@ void *l1sram_alloc_max(size_t *psize) /* add mutex operation */ spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags); - put_cpu(); return addr; } @@ -635,7 +627,7 @@ int l1sram_free(const void *addr) int ret; unsigned int cpu; - cpu = get_cpu(); + cpu = smp_processor_id(); /* add mutex operation */ spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags); @@ -644,7 +636,6 @@ int l1sram_free(const void *addr) /* add mutex operation */ spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags); - put_cpu(); return ret; } -- cgit v1.2.3 From cb191718fcca887fb4d5e3de833546ab12b433f9 Mon Sep 17 00:00:00 2001 From: Yi Li Date: Wed, 30 Dec 2009 07:12:50 +0000 Subject: Blackfin: try to simplify interrupt ifdef ugliness Signed-off-by: Yi Li Signed-off-by: Mike Frysinger --- arch/blackfin/mach-common/ints-priority.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c index efbdb6a19418..3589fe8b86ba 100644 --- a/arch/blackfin/mach-common/ints-priority.c +++ b/arch/blackfin/mach-common/ints-priority.c @@ -1062,7 +1062,6 @@ int __init init_arch_irq(void) #elif defined(CONFIG_BF538) || defined(CONFIG_BF539) case IRQ_PORTF_INTA: #endif - set_irq_chained_handler(irq, bfin_demux_gpio_irq); break; @@ -1073,24 +1072,30 @@ int __init init_arch_irq(void) #endif #ifdef CONFIG_SMP -#ifdef CONFIG_TICKSOURCE_CORETMR - case IRQ_CORETMR: -#endif case IRQ_SUPPLE_0: case IRQ_SUPPLE_1: set_irq_handler(irq, handle_percpu_irq); break; #endif -#ifdef CONFIG_IPIPE -#ifndef CONFIG_TICKSOURCE_CORETMR - case IRQ_TIMER0: +#ifdef CONFIG_TICKSOURCE_CORETMR + case IRQ_CORETMR: +# ifdef CONFIG_SMP + set_irq_handler(irq, handle_percpu_irq); + break; +# else set_irq_handler(irq, handle_simple_irq); break; +# endif #endif - case IRQ_CORETMR: + +#ifdef CONFIG_TICKSOURCE_GPTMR0 + case IRQ_TIMER0: set_irq_handler(irq, handle_simple_irq); break; +#endif + +#ifdef CONFIG_IPIPE default: set_irq_handler(irq, handle_level_irq); break; -- cgit v1.2.3 From 0afc272cc6044b7a4f1318eadbfb10c4be654441 Mon Sep 17 00:00:00 2001 From: Jie Zhang Date: Tue, 5 Jan 2010 04:22:33 +0000 Subject: Blackfin: fix relocation errors with large initramfs images Since we are now discarding .exit.text at runtime instead of link time, we need to place all .text sections ahead of the .data sections. Otherwise, a really large attached initramfs may cause link errors as it pushes the PC relative relocations behind the limits of the Blackfin ISA (~16meg). The instructions in the .exit.text are unable to call back into the .text sections leading to a link failure. Signed-off-by: Jie Zhang Signed-off-by: Mike Frysinger --- arch/blackfin/kernel/vmlinux.lds.S | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S index 66799e763dc9..be4b1bbb3918 100644 --- a/arch/blackfin/kernel/vmlinux.lds.S +++ b/arch/blackfin/kernel/vmlinux.lds.S @@ -104,18 +104,21 @@ SECTIONS ___init_begin = .; INIT_TEXT_SECTION(PAGE_SIZE) - . = ALIGN(16); - INIT_DATA_SECTION(16) - PERCPU(4) - /* we have to discard exit text and such at runtime, not link time, to + /* We have to discard exit text and such at runtime, not link time, to * handle embedded cross-section references (alt instructions, bug - * table, eh_frame, etc...) + * table, eh_frame, etc...). We need all of our .text up front and + * .data after it for PCREL call issues. */ .exit.text : { EXIT_TEXT } + + . = ALIGN(16); + INIT_DATA_SECTION(16) + PERCPU(4) + .exit.data : { EXIT_DATA -- cgit v1.2.3 From 6feda3a6530204ad73a2dc1713c051462a1d9c49 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Tue, 5 Jan 2010 07:05:50 +0000 Subject: Blackfin: replace harcoded define with proper THREAD_SIZE macro Signed-off-by: Barry Song Signed-off-by: Mike Frysinger --- arch/blackfin/kernel/setup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c index 8dc7ee1ef336..a0bc7d3e1bff 100644 --- a/arch/blackfin/kernel/setup.c +++ b/arch/blackfin/kernel/setup.c @@ -635,7 +635,7 @@ static __init void memory_setup(void) __bss_start, __bss_stop, _sdata, _edata, (void *)&init_thread_union, - (void *)((int)(&init_thread_union) + 0x2000), + (void *)((int)(&init_thread_union) + THREAD_SIZE), __init_begin, __init_end, (void *)_ramstart, (void *)memory_end #ifdef CONFIG_MTD_UCLINUX -- cgit v1.2.3 From 8916a1499d958fcb9086a2c49a5fa3000689bb81 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Tue, 5 Jan 2010 07:16:32 +0000 Subject: Blackfin: fix the section name of init_thread_union Use the common attribute rather than setting the section name directly. The common linker script defines expect the newer naming. Signed-off-by: Barry Song Signed-off-by: Mike Frysinger --- arch/blackfin/kernel/init_task.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/blackfin/kernel/init_task.c b/arch/blackfin/kernel/init_task.c index 118c5b9dedac..d3970e8acd1a 100644 --- a/arch/blackfin/kernel/init_task.c +++ b/arch/blackfin/kernel/init_task.c @@ -28,5 +28,5 @@ EXPORT_SYMBOL(init_task); * "init_task" linker map entry. */ union thread_union init_thread_union - __attribute__ ((__section__(".init_task.data"))) = { + __init_task_data = { INIT_THREAD_INFO(init_task)}; -- cgit v1.2.3 From aad16f32284030907b4f105e92e5fb534fd272bc Mon Sep 17 00:00:00 2001 From: Barry Song Date: Tue, 5 Jan 2010 07:25:24 +0000 Subject: Blackfin: fix initial stack pointer setup During very early init, the stack pointer is given a slightly incorrect value (&init_thread_union). The value is later adjusted to the right one during early init (&init_thread_union + THREAD_SIZE), but it is used a few times in between. While the few functions used don't actually put things onto the stack (due to optimization), it's best if we simply use the right value from the start. Signed-off-by: Barry Song Signed-off-by: Mike Frysinger --- arch/blackfin/mach-common/head.S | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/arch/blackfin/mach-common/head.S b/arch/blackfin/mach-common/head.S index cab0a0031eee..cf037782c8ad 100644 --- a/arch/blackfin/mach-common/head.S +++ b/arch/blackfin/mach-common/head.S @@ -144,8 +144,8 @@ ENTRY(__start) #endif /* Initialize stack pointer */ - sp.l = _init_thread_union; - sp.h = _init_thread_union; + sp.l = _init_thread_union + THREAD_SIZE; + sp.h = _init_thread_union + THREAD_SIZE; fp = sp; usp = sp; @@ -257,12 +257,7 @@ ENTRY(_real_start) R0 = R7; call _cmdline_init; - /* Load the current thread pointer and stack */ - p1 = THREAD_SIZE + 4 (z); /* +4 is for reti loading */ - sp = sp + p1; - usp = sp; - fp = sp; - sp += -12; + sp += -12 + 4; /* +4 is for reti loading above */ call _init_pda sp += 12; jump.l _start_kernel; -- cgit v1.2.3 From d86bfb1600db38e8387beee0aaab4263cfd728a2 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Thu, 7 Jan 2010 04:11:17 +0000 Subject: Blackfin: initial XIP support Signed-off-by: Barry Song Signed-off-by: Mike Frysinger --- arch/blackfin/Kconfig | 16 ++++++--- arch/blackfin/Makefile | 6 +++- arch/blackfin/boot/Makefile | 23 +++++++++--- arch/blackfin/include/asm/context.S | 10 ++++++ arch/blackfin/include/asm/sections.h | 3 ++ arch/blackfin/kernel/cplb-mpu/cplbinit.c | 9 +++++ arch/blackfin/kernel/cplb-nompu/cplbinit.c | 9 +++++ arch/blackfin/kernel/entry.S | 8 ++--- arch/blackfin/kernel/setup.c | 16 +++++++-- arch/blackfin/kernel/vmlinux.lds.S | 57 ++++++++++++++++++++++++++++++ arch/blackfin/mach-common/entry.S | 17 ++++----- arch/blackfin/mach-common/head.S | 5 +++ arch/blackfin/mach-common/interrupt.S | 10 +++--- 13 files changed, 160 insertions(+), 29 deletions(-) diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index c0d6d966adec..f46db59eac8a 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig @@ -26,9 +26,9 @@ config BLACKFIN select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_TRACER select HAVE_IDE - select HAVE_KERNEL_GZIP - select HAVE_KERNEL_BZIP2 - select HAVE_KERNEL_LZMA + select HAVE_KERNEL_GZIP if RAMKERNEL + select HAVE_KERNEL_BZIP2 if RAMKERNEL + select HAVE_KERNEL_LZMA if RAMKERNEL select HAVE_OPROFILE select ARCH_WANT_OPTIONAL_GPIOLIB @@ -407,10 +407,18 @@ config BOOT_LOAD config ROM_BASE hex "Kernel ROM Base" depends on ROMKERNEL - default "0x20040000" + default "0x20040040" range 0x20000000 0x20400000 if !(BF54x || BF561) range 0x20000000 0x30000000 if (BF54x || BF561) help + Make sure your ROM base does not include any file-header + information that is prepended to the kernel. + + For example, the bootable U-Boot format (created with + mkimage) has a 64 byte header (0x40). So while the image + you write to flash might start at say 0x20080000, you have + to add 0x40 to get the kernel's ROM base as it will come + after the header. comment "Clock/PLL Setup" diff --git a/arch/blackfin/Makefile b/arch/blackfin/Makefile index d4c7177e7656..ba84206d0554 100644 --- a/arch/blackfin/Makefile +++ b/arch/blackfin/Makefile @@ -14,6 +14,9 @@ OBJCOPYFLAGS := -O binary -R .note -R .comment -S GZFLAGS := -9 KBUILD_CFLAGS += $(call cc-option,-mno-fdpic) +ifeq ($(CONFIG_ROMKERNEL),y) +KBUILD_CFLAGS += -mlong-calls +endif KBUILD_AFLAGS += $(call cc-option,-mno-fdpic) CFLAGS_MODULE += -mlong-calls LDFLAGS_MODULE += -m elf32bfin @@ -138,7 +141,7 @@ archclean: INSTALL_PATH ?= /tftpboot boot := arch/$(ARCH)/boot -BOOT_TARGETS = vmImage vmImage.bin vmImage.bz2 vmImage.gz vmImage.lzma +BOOT_TARGETS = vmImage vmImage.bin vmImage.bz2 vmImage.gz vmImage.lzma vmImage.xip PHONY += $(BOOT_TARGETS) install KBUILD_IMAGE := $(boot)/vmImage @@ -156,6 +159,7 @@ define archhelp echo ' vmImage.bz2 - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.bz2)' echo '* vmImage.gz - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.gz)' echo ' vmImage.lzma - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.lzma)' + echo ' vmImage.xip - XIP Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.xip)' echo ' install - Install kernel using' echo ' (your) ~/bin/$(INSTALLKERNEL) or' echo ' (distribution) PATH: $(INSTALLKERNEL) or' diff --git a/arch/blackfin/boot/Makefile b/arch/blackfin/boot/Makefile index e9c48c6f8c1f..d1b3d6051fdf 100644 --- a/arch/blackfin/boot/Makefile +++ b/arch/blackfin/boot/Makefile @@ -8,14 +8,18 @@ MKIMAGE := $(srctree)/scripts/mkuboot.sh -targets := vmImage vmImage.bin vmImage.bz2 vmImage.gz vmImage.lzma -extra-y += vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma +targets := vmImage vmImage.bin vmImage.bz2 vmImage.gz vmImage.lzma vmImage.xip +extra-y += vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xip + +UIMAGE_OPTS-y := +UIMAGE_OPTS-$(CONFIG_RAMKERNEL) += -a $(CONFIG_BOOT_LOAD) +UIMAGE_OPTS-$(CONFIG_ROMKERNEL) += -a $(CONFIG_ROM_BASE) -x quiet_cmd_uimage = UIMAGE $@ cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A $(ARCH) -O linux -T kernel \ - -C $(2) -n '$(MACHINE)-$(KERNELRELEASE)' -a $(CONFIG_BOOT_LOAD) \ + -C $(2) -n '$(MACHINE)-$(KERNELRELEASE)' \ -e $(shell $(NM) vmlinux | awk '$$NF == "__start" {print $$1}') \ - -d $< $@ + $(UIMAGE_OPTS-y) -d $< $@ $(obj)/vmlinux.bin: vmlinux FORCE $(call if_changed,objcopy) @@ -29,6 +33,12 @@ $(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE $(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE $(call if_changed,lzma) +# The mkimage tool wants 64bytes prepended to the image +quiet_cmd_mk_bin_xip = BIN $@ + cmd_mk_bin_xip = ( printf '%64s' | tr ' ' '\377' ; cat $< ) > $@ +$(obj)/vmlinux.bin.xip: $(obj)/vmlinux.bin FORCE + $(call if_changed,mk_bin_xip) + $(obj)/vmImage.bin: $(obj)/vmlinux.bin $(call if_changed,uimage,none) @@ -41,10 +51,15 @@ $(obj)/vmImage.gz: $(obj)/vmlinux.bin.gz $(obj)/vmImage.lzma: $(obj)/vmlinux.bin.lzma $(call if_changed,uimage,lzma) +$(obj)/vmImage.xip: $(obj)/vmlinux.bin.xip + $(call if_changed,uimage,none) + suffix-y := bin suffix-$(CONFIG_KERNEL_GZIP) := gz suffix-$(CONFIG_KERNEL_BZIP2) := bz2 suffix-$(CONFIG_KERNEL_LZMA) := lzma +suffix-$(CONFIG_ROMKERNEL) := xip + $(obj)/vmImage: $(obj)/vmImage.$(suffix-y) @ln -sf $(notdir $<) $@ diff --git a/arch/blackfin/include/asm/context.S b/arch/blackfin/include/asm/context.S index 5dffaf582a22..fada8e974a73 100644 --- a/arch/blackfin/include/asm/context.S +++ b/arch/blackfin/include/asm/context.S @@ -374,3 +374,13 @@ (R7:0, P5:0) = [SP++]; .endm + +.macro pseudo_long_call func:req, scratch:req +#ifdef CONFIG_ROMKERNEL + \scratch\().l = \func; + \scratch\().h = \func; + call (\scratch); +#else + call \func; +#endif +.endm diff --git a/arch/blackfin/include/asm/sections.h b/arch/blackfin/include/asm/sections.h index 42f6c53c59c6..14a3e66d9167 100644 --- a/arch/blackfin/include/asm/sections.h +++ b/arch/blackfin/include/asm/sections.h @@ -21,6 +21,9 @@ extern unsigned long memory_start, memory_end, physical_mem_end; extern char _stext_l1[], _etext_l1[], _text_l1_lma[], __weak _text_l1_len[]; extern char _sdata_l1[], _edata_l1[], _sbss_l1[], _ebss_l1[], _data_l1_lma[], __weak _data_l1_len[]; +#ifdef CONFIG_ROMKERNEL +extern char _data_lma[], _data_len[], _sinitdata[], _einitdata[], _init_data_lma[], _init_data_len[]; +#endif extern char _sdata_b_l1[], _edata_b_l1[], _sbss_b_l1[], _ebss_b_l1[], _data_b_l1_lma[], __weak _data_b_l1_len[]; extern char _stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[], diff --git a/arch/blackfin/kernel/cplb-mpu/cplbinit.c b/arch/blackfin/kernel/cplb-mpu/cplbinit.c index 8d42b9e50dfa..30fd6417f069 100644 --- a/arch/blackfin/kernel/cplb-mpu/cplbinit.c +++ b/arch/blackfin/kernel/cplb-mpu/cplbinit.c @@ -64,6 +64,15 @@ void __init generate_cplb_tables_cpu(unsigned int cpu) icplb_tbl[cpu][i_i++].data = i_data | (addr == 0 ? CPLB_USER_RD : 0); } +#ifdef CONFIG_ROMKERNEL + /* Cover kernel XIP flash area */ + addr = CONFIG_ROM_BASE & ~(4 * 1024 * 1024 - 1); + dcplb_tbl[cpu][i_d].addr = addr; + dcplb_tbl[cpu][i_d++].data = d_data | CPLB_USER_RD; + icplb_tbl[cpu][i_i].addr = addr; + icplb_tbl[cpu][i_i++].data = i_data | CPLB_USER_RD; +#endif + /* Cover L1 memory. One 4M area for code and data each is enough. */ #if L1_DATA_A_LENGTH > 0 || L1_DATA_B_LENGTH > 0 dcplb_tbl[cpu][i_d].addr = get_l1_data_a_start_cpu(cpu); diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinit.c b/arch/blackfin/kernel/cplb-nompu/cplbinit.c index 282a7919821b..bfe75af4e8bd 100644 --- a/arch/blackfin/kernel/cplb-nompu/cplbinit.c +++ b/arch/blackfin/kernel/cplb-nompu/cplbinit.c @@ -56,6 +56,15 @@ void __init generate_cplb_tables_cpu(unsigned int cpu) i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB; } +#ifdef CONFIG_ROMKERNEL + /* Cover kernel XIP flash area */ + addr = CONFIG_ROM_BASE & ~(4 * 1024 * 1024 - 1); + d_tbl[i_d].addr = addr; + d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB; + i_tbl[i_i].addr = addr; + i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB; +#endif + /* Cover L1 memory. One 4M area for code and data each is enough. */ if (cpu == 0) { if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) { diff --git a/arch/blackfin/kernel/entry.S b/arch/blackfin/kernel/entry.S index f27dc2292e1b..686478f5f66b 100644 --- a/arch/blackfin/kernel/entry.S +++ b/arch/blackfin/kernel/entry.S @@ -44,7 +44,7 @@ ENTRY(_ret_from_fork) sti r4; #endif /* CONFIG_IPIPE */ SP += -12; - call _schedule_tail; + pseudo_long_call _schedule_tail, p5; SP += 12; r0 = [sp + PT_IPEND]; cc = bittst(r0,1); @@ -79,7 +79,7 @@ ENTRY(_sys_vfork) r0 += 24; [--sp] = rets; SP += -12; - call _bfin_vfork; + pseudo_long_call _bfin_vfork, p2; SP += 12; rets = [sp++]; rts; @@ -90,7 +90,7 @@ ENTRY(_sys_clone) r0 += 24; [--sp] = rets; SP += -12; - call _bfin_clone; + pseudo_long_call _bfin_clone, p2; SP += 12; rets = [sp++]; rts; @@ -101,7 +101,7 @@ ENTRY(_sys_rt_sigreturn) r0 += 24; [--sp] = rets; SP += -12; - call _do_rt_sigreturn; + pseudo_long_call _do_rt_sigreturn, p2; SP += 12; rets = [sp++]; rts; diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c index a0bc7d3e1bff..b54ba45db5f1 100644 --- a/arch/blackfin/kernel/setup.c +++ b/arch/blackfin/kernel/setup.c @@ -220,6 +220,16 @@ void __init bfin_relocate_l1_mem(void) memcpy(_stext_l2, _l2_lma, l2_len); } +#ifdef CONFIG_ROMKERNEL +void __init bfin_relocate_xip_data(void) +{ + early_shadow_stamp(); + + memcpy(_sdata, _data_lma, (unsigned long)_data_len - THREAD_SIZE + sizeof(struct thread_info)); + memcpy(_sinitdata, _init_data_lma, (unsigned long)_init_data_len); +} +#endif + /* add_memory_region to memmap */ static void __init add_memory_region(unsigned long long start, unsigned long long size, int type) @@ -504,7 +514,7 @@ static __init void memory_setup(void) #endif unsigned long max_mem; - _rambase = (unsigned long)_stext; + _rambase = CONFIG_BOOT_LOAD; _ramstart = (unsigned long)_end; if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) { @@ -1261,8 +1271,8 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n", physical_mem_end >> 10, (void *)0, (void *)physical_mem_end); seq_printf(m, "kernel memory\t: %d kB (0x%p -> 0x%p)\n", - ((int)memory_end - (int)_stext) >> 10, - _stext, + ((int)memory_end - (int)_rambase) >> 10, + (void *)_rambase, (void *)memory_end); seq_printf(m, "\n"); diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S index be4b1bbb3918..984c78172397 100644 --- a/arch/blackfin/kernel/vmlinux.lds.S +++ b/arch/blackfin/kernel/vmlinux.lds.S @@ -15,7 +15,12 @@ _jiffies = _jiffies_64; SECTIONS { +#ifdef CONFIG_RAMKERNEL . = CONFIG_BOOT_LOAD; +#else + . = CONFIG_ROM_BASE; +#endif + /* Neither the text, ro_data or bss section need to be aligned * So pack them back to back */ @@ -31,6 +36,12 @@ SECTIONS LOCK_TEXT IRQENTRY_TEXT KPROBES_TEXT +#ifdef CONFIG_ROMKERNEL + __sinittext = .; + INIT_TEXT + __einittext = .; + EXIT_TEXT +#endif *(.text.*) *(.fixup) @@ -50,8 +61,14 @@ SECTIONS /* Just in case the first read only is a 32-bit access */ RO_DATA(4) + __rodata_end = .; +#ifdef CONFIG_ROMKERNEL + . = CONFIG_BOOT_LOAD; + .bss : AT(__rodata_end) +#else .bss : +#endif { . = ALIGN(4); ___bss_start = .; @@ -67,7 +84,11 @@ SECTIONS ___bss_stop = .; } +#if defined(CONFIG_ROMKERNEL) + .data : AT(LOADADDR(.bss) + SIZEOF(.bss)) +#else .data : +#endif { __sdata = .; /* This gets done first, so the glob doesn't suck it in */ @@ -94,6 +115,8 @@ SECTIONS __edata = .; } + __data_lma = LOADADDR(.data); + __data_len = SIZEOF(.data); /* The init section should be last, so when we free it, it goes into * the general memory pool, and (hopefully) will decrease fragmentation @@ -103,6 +126,7 @@ SECTIONS . = ALIGN(PAGE_SIZE); ___init_begin = .; +#ifdef CONFIG_RAMKERNEL INIT_TEXT_SECTION(PAGE_SIZE) /* We have to discard exit text and such at runtime, not link time, to @@ -125,6 +149,35 @@ SECTIONS } .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data)) +#else + .init.data : AT(__data_lma + __data_len) + { + __sinitdata = .; + INIT_DATA + INIT_SETUP(16) + INIT_CALLS + CON_INITCALL + SECURITY_INITCALL + INIT_RAM_FS + + . = ALIGN(4); + ___per_cpu_load = .; + ___per_cpu_start = .; + *(.data.percpu.first) + *(.data.percpu.page_aligned) + *(.data.percpu) + *(.data.percpu.shared_aligned) + ___per_cpu_end = .; + + EXIT_DATA + __einitdata = .; + } + __init_data_lma = LOADADDR(.init.data); + __init_data_len = SIZEOF(.init.data); + __init_data_end = .; + + .text_l1 L1_CODE_START : AT(__init_data_lma + __init_data_len) +#endif { . = ALIGN(4); __stext_l1 = .; @@ -205,7 +258,11 @@ SECTIONS /* Force trailing alignment of our init section so that when we * free our init memory, we don't leave behind a partial page. */ +#ifdef CONFIG_RAMKERNEL . = __l2_lma + __l2_len; +#else + . = __init_data_end; +#endif . = ALIGN(PAGE_SIZE); ___init_end = .; diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S index 0df7ef200f9d..ccfa7c490ff9 100644 --- a/arch/blackfin/mach-common/entry.S +++ b/arch/blackfin/mach-common/entry.S @@ -405,7 +405,7 @@ ENTRY(_double_fault) r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */ SP += -12; - call _double_fault_c; + pseudo_long_call _double_fault_c, p5; SP += 12; .L_double_fault_panic: JUMP .L_double_fault_panic @@ -447,7 +447,7 @@ ENTRY(_exception_to_level5) r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */ SP += -12; - call _trap_c; + pseudo_long_call _trap_c, p4; SP += 12; /* If interrupts were off during the exception (IPEND[4] = 1), turn them off @@ -551,7 +551,7 @@ ENTRY(_kernel_execve) p0 = sp; sp += -16; [sp + 12] = p0; - call _do_execve; + pseudo_long_call _do_execve, p5; SP += 16; cc = r0 == 0; if ! cc jump .Lexecve_failed; @@ -704,7 +704,7 @@ ENTRY(_system_call) sp += 4; SP += -12; - call _schedule; + pseudo_long_call _schedule, p4; SP += 12; jump .Lresume_userspace_1; @@ -723,7 +723,7 @@ ENTRY(_system_call) r0 = sp; SP += -12; - call _do_notify_resume; + pseudo_long_call _do_notify_resume, p5; SP += 12; .Lsyscall_really_exit: @@ -736,7 +736,7 @@ ENDPROC(_system_call) * this symbol need not be global anyways, so ... */ _sys_trace: - call _syscall_trace; + pseudo_long_call _syscall_trace, p5; /* Execute the appropriate system call */ @@ -760,7 +760,7 @@ _sys_trace: SP += 24; [sp + PT_R0] = r0; - call _syscall_trace; + pseudo_long_call _syscall_trace, p5; jump .Lresume_userspace; ENDPROC(_sys_trace) @@ -1007,7 +1007,8 @@ ENTRY(_schedule_and_signal_from_int) r0 = sp; sp += -12; - call _finish_atomic_sections; + + pseudo_long_call _finish_atomic_sections, p5; sp += 12; jump.s .Lresume_userspace; ENDPROC(_schedule_and_signal_from_int) diff --git a/arch/blackfin/mach-common/head.S b/arch/blackfin/mach-common/head.S index cf037782c8ad..4391621d9048 100644 --- a/arch/blackfin/mach-common/head.S +++ b/arch/blackfin/mach-common/head.S @@ -186,6 +186,11 @@ ENTRY(__start) /* Put The Code for PLL Programming and SDRAM Programming in L1 ISRAM */ call _bfin_relocate_l1_mem; + +#ifdef CONFIG_ROMKERNEL + call _bfin_relocate_xip_data; +#endif + #ifdef CONFIG_BFIN_KERNEL_CLOCK /* Only use on-chip scratch space for stack when absolutely required * to avoid Anomaly 05000227 ... we know the init_clocks() func only diff --git a/arch/blackfin/mach-common/interrupt.S b/arch/blackfin/mach-common/interrupt.S index 8085ff1cce00..df984960cf90 100644 --- a/arch/blackfin/mach-common/interrupt.S +++ b/arch/blackfin/mach-common/interrupt.S @@ -109,10 +109,10 @@ __common_int_entry: cc = r0 == 0; if cc jump .Lcommon_restore_context; #else /* CONFIG_IPIPE */ - call _do_irq; + pseudo_long_call _do_irq, p2; SP += 12; #endif /* CONFIG_IPIPE */ - call _return_from_int; + pseudo_long_call _return_from_int, p2; .Lcommon_restore_context: RESTORE_CONTEXT rti; @@ -168,7 +168,7 @@ ENTRY(_evt_ivhw) r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */ SP += -12; - call _trap_c; + pseudo_long_call _trap_c, p5; SP += 12; #ifdef EBIU_ERRMST @@ -179,7 +179,7 @@ ENTRY(_evt_ivhw) w[p0] = r0.l; #endif - call _ret_from_exception; + pseudo_long_call _ret_from_exception, p2; .Lcommon_restore_all_sys: RESTORE_ALL_SYS @@ -223,7 +223,7 @@ ENTRY(_evt_system_call) #ifdef CONFIG_FRAME_POINTER fp = 0; #endif - call _system_call; + pseudo_long_call _system_call, p2; jump .Lcommon_restore_context; ENDPROC(_evt_system_call) -- cgit v1.2.3 From 48a74f9d1b381471397097851531c4e392f97451 Mon Sep 17 00:00:00 2001 From: Roel Kluin Date: Fri, 8 Jan 2010 20:06:22 +0100 Subject: Blackfin: fix decoding of opcodes 41-47 in decode_instruction() This condition allowed only decoding of opcode 0x0040 Signed-off-by: Roel Kluin Signed-off-by: Mike Frysinger --- arch/blackfin/kernel/traps.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c index d3cbcd6bd985..870d74b1b407 100644 --- a/arch/blackfin/kernel/traps.c +++ b/arch/blackfin/kernel/traps.c @@ -712,7 +712,7 @@ static void decode_instruction(unsigned short *address) verbose_printk("RTE"); else if (opcode == 0x0025) verbose_printk("EMUEXCPT"); - else if (opcode == 0x0040 && opcode <= 0x0047) + else if (opcode >= 0x0040 && opcode <= 0x0047) verbose_printk("STI R%i", opcode & 7); else if (opcode >= 0x0050 && opcode <= 0x0057) verbose_printk("JUMP (P%i)", opcode & 7); -- cgit v1.2.3 From 8d0177dbcbad26ffd451b607c8fa74295d81e7b9 Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Sat, 9 Jan 2010 00:48:01 +0000 Subject: Blackfin: kgdb: mark all local funcs/structs static Signed-off-by: Mike Frysinger --- arch/blackfin/kernel/kgdb.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c index 34c7c3ed2c9c..2c501ceb1e55 100644 --- a/arch/blackfin/kernel/kgdb.c +++ b/arch/blackfin/kernel/kgdb.c @@ -145,7 +145,7 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) #endif } -struct hw_breakpoint { +static struct hw_breakpoint { unsigned int occupied:1; unsigned int skip:1; unsigned int enabled:1; @@ -155,7 +155,7 @@ struct hw_breakpoint { unsigned int addr; } breakinfo[HW_WATCHPOINT_NUM]; -int bfin_set_hw_break(unsigned long addr, int len, enum kgdb_bptype type) +static int bfin_set_hw_break(unsigned long addr, int len, enum kgdb_bptype type) { int breakno; int bfin_type; @@ -202,7 +202,7 @@ int bfin_set_hw_break(unsigned long addr, int len, enum kgdb_bptype type) return -ENOSPC; } -int bfin_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype type) +static int bfin_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype type) { int breakno; int bfin_type; @@ -230,7 +230,7 @@ int bfin_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype type) return 0; } -void bfin_remove_all_hw_break(void) +static void bfin_remove_all_hw_break(void) { int breakno; @@ -242,7 +242,7 @@ void bfin_remove_all_hw_break(void) breakinfo[breakno].type = TYPE_DATA_WATCHPOINT; } -void bfin_correct_hw_break(void) +static void bfin_correct_hw_break(void) { int breakno; unsigned int wpiactl = 0; -- cgit v1.2.3 From 7e1082b7dbb60081b157d82106da33686cb9ea91 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Tue, 12 Jan 2010 03:59:18 +0000 Subject: Blackfin: bf533-{ezkit,stamp}: add sound platform devices Signed-off-by: Barry Song Signed-off-by: Mike Frysinger --- arch/blackfin/mach-bf533/boards/ezkit.c | 36 +++++++++++++++++++++++++++++++++ arch/blackfin/mach-bf533/boards/stamp.c | 36 +++++++++++++++++++++++++++++++++ 2 files changed, 72 insertions(+) diff --git a/arch/blackfin/mach-bf533/boards/ezkit.c b/arch/blackfin/mach-bf533/boards/ezkit.c index 5332c9828a33..84efda193847 100644 --- a/arch/blackfin/mach-bf533/boards/ezkit.c +++ b/arch/blackfin/mach-bf533/boards/ezkit.c @@ -473,6 +473,30 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = { #endif }; +#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) +static struct platform_device bfin_i2s = { + .name = "bfin-i2s", + .id = CONFIG_SND_BF5XX_SPORT_NUM, + /* TODO: add platform data here */ +}; +#endif + +#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) +static struct platform_device bfin_tdm = { + .name = "bfin-tdm", + .id = CONFIG_SND_BF5XX_SPORT_NUM, + /* TODO: add platform data here */ +}; +#endif + +#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) +static struct platform_device bfin_ac97 = { + .name = "bfin-ac97", + .id = CONFIG_SND_BF5XX_SPORT_NUM, + /* TODO: add platform data here */ +}; +#endif + static struct platform_device *ezkit_devices[] __initdata = { &bfin_dpmc, @@ -518,6 +542,18 @@ static struct platform_device *ezkit_devices[] __initdata = { #if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE) &i2c_gpio_device, #endif + +#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) + &bfin_i2s, +#endif + +#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) + &bfin_tdm, +#endif + +#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) + &bfin_ac97, +#endif }; static int __init ezkit_init(void) diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c index 3e80a795d3c7..e997d7f9a671 100644 --- a/arch/blackfin/mach-bf533/boards/stamp.c +++ b/arch/blackfin/mach-bf533/boards/stamp.c @@ -562,6 +562,30 @@ static struct platform_device bfin_dpmc = { }, }; +#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) +static struct platform_device bfin_i2s = { + .name = "bfin-i2s", + .id = CONFIG_SND_BF5XX_SPORT_NUM, + /* TODO: add platform data here */ +}; +#endif + +#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) +static struct platform_device bfin_tdm = { + .name = "bfin-tdm", + .id = CONFIG_SND_BF5XX_SPORT_NUM, + /* TODO: add platform data here */ +}; +#endif + +#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) +static struct platform_device bfin_ac97 = { + .name = "bfin-ac97", + .id = CONFIG_SND_BF5XX_SPORT_NUM, + /* TODO: add platform data here */ +}; +#endif + static struct platform_device *stamp_devices[] __initdata = { &bfin_dpmc, @@ -614,6 +638,18 @@ static struct platform_device *stamp_devices[] __initdata = { #if defined(CONFIG_MTD_BFIN_ASYNC) || defined(CONFIG_MTD_BFIN_ASYNC_MODULE) &stamp_flash_device, #endif + +#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) + &bfin_i2s, +#endif + +#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) + &bfin_tdm, +#endif + +#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) + &bfin_ac97, +#endif }; static int __init stamp_init(void) -- cgit v1.2.3 From c6669c223a409199a45bb8f31a2e160ee26d19cc Mon Sep 17 00:00:00 2001 From: Graf Yang Date: Wed, 13 Jan 2010 06:09:34 +0000 Subject: Blackfin: fix up mm locking in address dumping The locking code in the address dumper needs to grab the mm's mmap_sem so that other CPUs do not get an inconsistent view. On UP systems this really wasn't a problem, but it is easy to trigger a race on SMP systems when another CPU removes a mapping. Signed-off-by: Graf Yang Signed-off-by: Mike Frysinger --- arch/blackfin/kernel/traps.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c index 870d74b1b407..85766805b03a 100644 --- a/arch/blackfin/kernel/traps.c +++ b/arch/blackfin/kernel/traps.c @@ -138,6 +138,12 @@ static void decode_address(char *buf, unsigned long address) if (!mm) continue; + if (!down_read_trylock(&mm->mmap_sem)) { + if (!in_atomic) + mmput(mm); + continue; + } + for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) { struct vm_area_struct *vma; @@ -177,6 +183,7 @@ static void decode_address(char *buf, unsigned long address) sprintf(buf, "[ %s vma:0x%lx-0x%lx]", name, vma->vm_start, vma->vm_end); + up_read(&mm->mmap_sem); if (!in_atomic) mmput(mm); @@ -186,11 +193,16 @@ static void decode_address(char *buf, unsigned long address) goto done; } } + + up_read(&mm->mmap_sem); if (!in_atomic) mmput(mm); } - /* we were unable to find this address anywhere */ + /* + * we were unable to find this address anywhere, + * or some MMs were skipped because they were in use. + */ sprintf(buf, "/* kernel dynamic memory */"); done: -- cgit v1.2.3 From 29bb3bc0c71d867d50b18a0277bf28cd8dd3fca7 Mon Sep 17 00:00:00 2001 From: Cliff Cai Date: Thu, 14 Jan 2010 08:28:38 +0000 Subject: Blackfin: bf527-ezkit/bf537-stamp: add example SSM2602 I2C resources Signed-off-by: Cliff Cai Signed-off-by: Mike Frysinger --- arch/blackfin/mach-bf527/boards/ezkit.c | 5 +++++ arch/blackfin/mach-bf537/boards/stamp.c | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c index a7beed517c2e..9f2cb2b86528 100644 --- a/arch/blackfin/mach-bf527/boards/ezkit.c +++ b/arch/blackfin/mach-bf527/boards/ezkit.c @@ -905,6 +905,11 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = { .platform_data = (void *)&adp5520_pdev_data, }, #endif +#if defined(CONFIG_SND_SOC_SSM2602) || defined(CONFIG_SND_SOC_SSM2602_MODULE) + { + I2C_BOARD_INFO("ssm2602", 0x1b), + }, +#endif }; #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c index 27b9c9ce8d06..d47f4ed6b935 100644 --- a/arch/blackfin/mach-bf537/boards/stamp.c +++ b/arch/blackfin/mach-bf537/boards/stamp.c @@ -1674,6 +1674,11 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = { I2C_BOARD_INFO("ad5258", 0x18), }, #endif +#if defined(CONFIG_SND_SOC_SSM2602) || defined(CONFIG_SND_SOC_SSM2602_MODULE) + { + I2C_BOARD_INFO("ssm2602", 0x1b), + }, +#endif }; #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) -- cgit v1.2.3 From 38e7673f24e6c19aba9ecff86760cb7981b91464 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Fri, 15 Jan 2010 03:24:39 +0000 Subject: Blackfin: bf537-stamp: declare parallel flash as ROM with XIP kernel When the kernel is executing out of parallel flash (XIP), we can't have the flash go into an erase/programming cycle, otherwise the instruction fetching steps fail and everything crashes. Signed-off-by: Barry Song Signed-off-by: Mike Frysinger --- arch/blackfin/mach-bf537/boards/stamp.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c index d47f4ed6b935..26429702db58 100644 --- a/arch/blackfin/mach-bf537/boards/stamp.c +++ b/arch/blackfin/mach-bf537/boards/stamp.c @@ -455,6 +455,9 @@ static struct physmap_flash_data stamp_flash_data = { .width = 2, .parts = stamp_partitions, .nr_parts = ARRAY_SIZE(stamp_partitions), +#ifdef CONFIG_ROMKERNEL + .probe_type = "map_rom", +#endif }; static struct resource stamp_flash_resource = { -- cgit v1.2.3 From 5aff1642aee0fe3cb9be7339fcc09dd2bd1976f0 Mon Sep 17 00:00:00 2001 From: Harald Krapfenbauer Date: Fri, 15 Jan 2010 09:25:11 +0000 Subject: Blackfin: tcm-bf518: new board port Signed-off-by: Harald Krapfenbauer Signed-off-by: Mike Frysinger --- arch/blackfin/configs/TCM-BF518_defconfig | 1375 +++++++++++++++++++++++++++ arch/blackfin/mach-bf518/boards/Kconfig | 5 + arch/blackfin/mach-bf518/boards/Makefile | 1 + arch/blackfin/mach-bf518/boards/tcm-bf518.c | 753 +++++++++++++++ 4 files changed, 2134 insertions(+) create mode 100644 arch/blackfin/configs/TCM-BF518_defconfig create mode 100644 arch/blackfin/mach-bf518/boards/tcm-bf518.c diff --git a/arch/blackfin/configs/TCM-BF518_defconfig b/arch/blackfin/configs/TCM-BF518_defconfig new file mode 100644 index 000000000000..4d31e2a4ed46 --- /dev/null +++ b/arch/blackfin/configs/TCM-BF518_defconfig @@ -0,0 +1,1375 @@ +# +# Automatically generated make config: don't edit +# Linux kernel version: 2.6.32.3 +# +# CONFIG_MMU is not set +# CONFIG_FPU is not set +CONFIG_RWSEM_GENERIC_SPINLOCK=y +# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set +CONFIG_BLACKFIN=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_BUG=y +CONFIG_ZONE_DMA=y +CONFIG_GENERIC_FIND_NEXT_BIT=y +CONFIG_GENERIC_HARDIRQS=y +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y +CONFIG_GENERIC_GPIO=y +CONFIG_FORCE_MAX_ZONEORDER=14 +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y + +# +# General setup +# +CONFIG_EXPERIMENTAL=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_LOCALVERSION="" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_BZIP2=y +CONFIG_HAVE_KERNEL_LZMA=y +# CONFIG_KERNEL_GZIP is not set +# CONFIG_KERNEL_BZIP2 is not set +CONFIG_KERNEL_LZMA=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +# CONFIG_TASKSTATS is not set +# CONFIG_AUDIT is not set + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_TREE_PREEMPT_RCU is not set +# CONFIG_RCU_TRACE is not set +CONFIG_RCU_FANOUT=32 +# CONFIG_RCU_FANOUT_EXACT is not set +# CONFIG_TREE_RCU_TRACE is not set +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=14 +# CONFIG_GROUP_SCHED is not set +# CONFIG_CGROUPS is not set +# CONFIG_SYSFS_DEPRECATED_V2 is not set +# CONFIG_RELAY is not set +# CONFIG_NAMESPACES is not set +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +# CONFIG_RD_GZIP is not set +# CONFIG_RD_BZIP2 is not set +CONFIG_RD_LZMA=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +CONFIG_EMBEDDED=y +CONFIG_UID16=y +# CONFIG_SYSCTL_SYSCALL is not set +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_ALL is not set +# CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_HOTPLUG=y +CONFIG_PRINTK=y +CONFIG_BUG=y +# CONFIG_ELF_CORE is not set +CONFIG_BASE_FULL=y +# CONFIG_FUTEX is not set +CONFIG_EPOLL=y +# CONFIG_SIGNALFD is not set +# CONFIG_TIMERFD is not set +# CONFIG_EVENTFD is not set +# CONFIG_AIO is not set + +# +# Kernel Performance Events And Counters +# +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_COMPAT_BRK=y +CONFIG_SLAB=y +# CONFIG_SLUB is not set +# CONFIG_SLOB is not set +CONFIG_MMAP_ALLOW_UNINITIALIZED=y +# CONFIG_PROFILING is not set +CONFIG_HAVE_OPROFILE=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +# CONFIG_SLOW_WORK is not set +# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set +CONFIG_SLABINFO=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_BLOCK=y +# CONFIG_LBDAF is not set +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_BLK_DEV_INTEGRITY is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +# CONFIG_IOSCHED_AS is not set +# CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_IOSCHED_CFQ is not set +# CONFIG_DEFAULT_AS is not set +# CONFIG_DEFAULT_DEADLINE is not set +# CONFIG_DEFAULT_CFQ is not set +CONFIG_DEFAULT_NOOP=y +CONFIG_DEFAULT_IOSCHED="noop" +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set +# CONFIG_FREEZER is not set + +# +# Blackfin Processor Options +# + +# +# Processor and Board Settings +# +# CONFIG_BF512 is not set +# CONFIG_BF514 is not set +# CONFIG_BF516 is not set +CONFIG_BF518=y +# CONFIG_BF522 is not set +# CONFIG_BF523 is not set +# CONFIG_BF524 is not set +# CONFIG_BF525 is not set +# CONFIG_BF526 is not set +# CONFIG_BF527 is not set +# CONFIG_BF531 is not set +# CONFIG_BF532 is not set +# CONFIG_BF533 is not set +# CONFIG_BF534 is not set +# CONFIG_BF536 is not set +# CONFIG_BF537 is not set +# CONFIG_BF538 is not set +# CONFIG_BF539 is not set +# CONFIG_BF542_std is not set +# CONFIG_BF542M is not set +# CONFIG_BF544_std is not set +# CONFIG_BF544M is not set +# CONFIG_BF547_std is not set +# CONFIG_BF547M is not set +# CONFIG_BF548_std is not set +# CONFIG_BF548M is not set +# CONFIG_BF549_std is not set +# CONFIG_BF549M is not set +# CONFIG_BF561 is not set +CONFIG_BF_REV_MIN=0 +CONFIG_BF_REV_MAX=2 +# CONFIG_BF_REV_0_0 is not set +CONFIG_BF_REV_0_1=y +# CONFIG_BF_REV_0_2 is not set +# CONFIG_BF_REV_0_3 is not set +# CONFIG_BF_REV_0_4 is not set +# CONFIG_BF_REV_0_5 is not set +# CONFIG_BF_REV_0_6 is not set +# CONFIG_BF_REV_ANY is not set +# CONFIG_BF_REV_NONE is not set +CONFIG_BF51x=y +# CONFIG_BFIN518F_EZBRD is not set +CONFIG_BFIN518F_TCM=y + +# +# BF518 Specific Configuration +# + +# +# Alternative Multiplexing Scheme +# +# CONFIG_BF518_SPORT0_PORTF is not set +CONFIG_BF518_SPORT0_PORTG=y +CONFIG_BF518_SPORT0_TSCLK_PG10=y +# CONFIG_BF518_SPORT0_TSCLK_PG14 is not set +CONFIG_BF518_UART1_PORTF=y +# CONFIG_BF518_UART1_PORTG is not set + +# +# Interrupt Priority Assignment +# + +# +# Priority +# +CONFIG_IRQ_PLL_WAKEUP=7 +CONFIG_IRQ_DMA0_ERROR=7 +CONFIG_IRQ_DMAR0_BLK=7 +CONFIG_IRQ_DMAR1_BLK=7 +CONFIG_IRQ_DMAR0_OVR=7 +CONFIG_IRQ_DMAR1_OVR=7 +CONFIG_IRQ_PPI_ERROR=7 +CONFIG_IRQ_MAC_ERROR=7 +CONFIG_IRQ_SPORT0_ERROR=7 +CONFIG_IRQ_SPORT1_ERROR=7 +CONFIG_IRQ_PTP_ERROR=7 +CONFIG_IRQ_UART0_ERROR=7 +CONFIG_IRQ_UART1_ERROR=7 +CONFIG_IRQ_RTC=8 +CONFIG_IRQ_PPI=8 +CONFIG_IRQ_SPORT0_RX=9 +CONFIG_IRQ_SPORT0_TX=9 +CONFIG_IRQ_SPORT1_RX=9 +CONFIG_IRQ_SPORT1_TX=9 +CONFIG_IRQ_TWI=10 +CONFIG_IRQ_SPI0=10 +CONFIG_IRQ_UART0_RX=10 +CONFIG_IRQ_UART0_TX=10 +CONFIG_IRQ_UART1_RX=10 +CONFIG_IRQ_UART1_TX=10 +CONFIG_IRQ_OPTSEC=11 +CONFIG_IRQ_CNT=11 +CONFIG_IRQ_MAC_RX=11 +CONFIG_IRQ_PORTH_INTA=11 +CONFIG_IRQ_MAC_TX=11 +CONFIG_IRQ_PORTH_INTB=11 +CONFIG_IRQ_TIMER0=12 +CONFIG_IRQ_TIMER1=12 +CONFIG_IRQ_TIMER2=12 +CONFIG_IRQ_TIMER3=12 +CONFIG_IRQ_TIMER4=12 +CONFIG_IRQ_TIMER5=12 +CONFIG_IRQ_TIMER6=12 +CONFIG_IRQ_TIMER7=12 +CONFIG_IRQ_PORTG_INTA=12 +CONFIG_IRQ_PORTG_INTB=12 +CONFIG_IRQ_MEM_DMA0=13 +CONFIG_IRQ_MEM_DMA1=13 +CONFIG_IRQ_WATCH=13 +CONFIG_IRQ_PORTF_INTA=13 +CONFIG_IRQ_PORTF_INTB=13 +CONFIG_IRQ_SPI0_ERROR=7 +CONFIG_IRQ_SPI1_ERROR=7 +CONFIG_IRQ_RSI_INT0=7 +CONFIG_IRQ_RSI_INT1=7 +CONFIG_IRQ_PWM_TRIP=10 +CONFIG_IRQ_PWM_SYNC=10 +CONFIG_IRQ_PTP_STAT=10 + +# +# Board customizations +# +# CONFIG_CMDLINE_BOOL is not set +CONFIG_BOOT_LOAD=0x1000 + +# +# Clock/PLL Setup +# +CONFIG_CLKIN_HZ=25000000 +# CONFIG_BFIN_KERNEL_CLOCK is not set +CONFIG_MAX_VCO_HZ=400000000 +CONFIG_MIN_VCO_HZ=50000000 +CONFIG_MAX_SCLK_HZ=133333333 +CONFIG_MIN_SCLK_HZ=27000000 + +# +# Kernel Timer/Scheduler +# +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +# CONFIG_SCHED_HRTICK is not set +CONFIG_GENERIC_TIME=y +CONFIG_GENERIC_CLOCKEVENTS=y + +# +# Clock event device +# +# CONFIG_TICKSOURCE_GPTMR0 is not set +CONFIG_TICKSOURCE_CORETMR=y + +# +# Clock souce +# +# CONFIG_CYCLES_CLOCKSOURCE is not set +# CONFIG_GPTMR0_CLOCKSOURCE is not set +# CONFIG_NO_HZ is not set +# CONFIG_HIGH_RES_TIMERS is not set +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y + +# +# Misc +# +CONFIG_BFIN_SCRATCH_REG_RETN=y +# CONFIG_BFIN_SCRATCH_REG_RETE is not set +# CONFIG_BFIN_SCRATCH_REG_CYCLES is not set + +# +# Blackfin Kernel Optimizations +# + +# +# Memory Optimizations +# +CONFIG_I_ENTRY_L1=y +CONFIG_EXCPT_IRQ_SYSC_L1=y +CONFIG_DO_IRQ_L1=y +CONFIG_CORE_TIMER_IRQ_L1=y +CONFIG_IDLE_L1=y +# CONFIG_SCHEDULE_L1 is not set +CONFIG_ARITHMETIC_OPS_L1=y +CONFIG_ACCESS_OK_L1=y +# CONFIG_MEMSET_L1 is not set +# CONFIG_MEMCPY_L1 is not set +# CONFIG_SYS_BFIN_SPINLOCK_L1 is not set +# CONFIG_IP_CHECKSUM_L1 is not set +CONFIG_CACHELINE_ALIGNED_L1=y +# CONFIG_SYSCALL_TAB_L1 is not set +# CONFIG_CPLB_SWITCH_TAB_L1 is not set +CONFIG_APP_STACK_L1=y + +# +# Speed Optimizations +# +CONFIG_BFIN_INS_LOWOVERHEAD=y +CONFIG_RAMKERNEL=y +# CONFIG_ROMKERNEL is not set +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +# CONFIG_DISCONTIGMEM_MANUAL is not set +# CONFIG_SPARSEMEM_MANUAL is not set +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=1 +CONFIG_VIRT_TO_BUS=y +CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0 +CONFIG_BFIN_GPTIMERS=m +# CONFIG_DMA_UNCACHED_4M is not set +# CONFIG_DMA_UNCACHED_2M is not set +CONFIG_DMA_UNCACHED_1M=y +# CONFIG_DMA_UNCACHED_512K is not set +# CONFIG_DMA_UNCACHED_256K is not set +# CONFIG_DMA_UNCACHED_128K is not set +# CONFIG_DMA_UNCACHED_NONE is not set + +# +# Cache Support +# +CONFIG_BFIN_ICACHE=y +CONFIG_BFIN_EXTMEM_ICACHEABLE=y +CONFIG_BFIN_DCACHE=y +# CONFIG_BFIN_DCACHE_BANKA is not set +CONFIG_BFIN_EXTMEM_DCACHEABLE=y +CONFIG_BFIN_EXTMEM_WRITEBACK=y +# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set + +# +# Memory Protection Unit +# +# CONFIG_MPU is not set + +# +# Asynchronous Memory Configuration +# + +# +# EBIU_AMGCTL Global Control +# +CONFIG_C_AMCKEN=y +CONFIG_C_CDPRIO=y +# CONFIG_C_AMBEN is not set +# CONFIG_C_AMBEN_B0 is not set +# CONFIG_C_AMBEN_B0_B1 is not set +# CONFIG_C_AMBEN_B0_B1_B2 is not set +CONFIG_C_AMBEN_ALL=y + +# +# EBIU_AMBCTL Control +# +CONFIG_BANK_0=0x7BB0 +CONFIG_BANK_1=0x7BB0 +CONFIG_BANK_2=0x7BB0 +CONFIG_BANK_3=0x99B2 + +# +# Bus options (PCI, PCMCIA, EISA, MCA, ISA) +# +# CONFIG_ARCH_SUPPORTS_MSI is not set +# CONFIG_PCCARD is not set + +# +# Executable file formats +# +CONFIG_BINFMT_ELF_FDPIC=y +CONFIG_BINFMT_FLAT=y +CONFIG_BINFMT_ZFLAT=y +# CONFIG_BINFMT_SHARED_FLAT is not set +# CONFIG_HAVE_AOUT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Power management options +# +# CONFIG_PM is not set +CONFIG_ARCH_SUSPEND_POSSIBLE=y + +# +# CPU Frequency scaling +# +# CONFIG_CPU_FREQ is not set +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_MMAP is not set +CONFIG_UNIX=y +# CONFIG_NET_KEY is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_FIB_HASH=y +CONFIG_IP_PNP=y +# CONFIG_IP_PNP_DHCP is not set +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_XFRM_TUNNEL is not set +# CONFIG_INET_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_TRANSPORT is not set +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +# CONFIG_INET_DIAG is not set +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +# CONFIG_IPV6 is not set +# CONFIG_NETLABEL is not set +# CONFIG_NETWORK_SECMARK is not set +# CONFIG_NETFILTER is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_NET_DSA is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set +# CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_WIRELESS is not set +# CONFIG_WIMAX is not set +# CONFIG_RFKILL is not set +# CONFIG_NET_9P is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +# CONFIG_FW_LOADER is not set +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_SYS_HYPERVISOR is not set +# CONFIG_CONNECTOR is not set +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_CONCAT is not set +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_REDBOOT_PARTS is not set +CONFIG_MTD_CMDLINE_PARTS=y +# CONFIG_MTD_AR7_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_MTD_OOPS is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=y +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_GEN_PROBE=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_NOSWAP=y +# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set +# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set +CONFIG_MTD_CFI_GEOMETRY=y +# CONFIG_MTD_MAP_BANK_WIDTH_1 is not set +CONFIG_MTD_MAP_BANK_WIDTH_2=y +# CONFIG_MTD_MAP_BANK_WIDTH_4 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +# CONFIG_MTD_CFI_I2 is not set +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_OTP is not set +CONFIG_MTD_CFI_INTELEXT=y +# CONFIG_MTD_CFI_AMDSTD is not set +# CONFIG_MTD_CFI_STAA is not set +CONFIG_MTD_CFI_UTIL=y +CONFIG_MTD_RAM=y +CONFIG_MTD_ROM=m +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +CONFIG_MTD_PHYSMAP=y +# CONFIG_MTD_PHYSMAP_COMPAT is not set +# CONFIG_MTD_UCLINUX is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_M25P80 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set +# CONFIG_MTD_NAND is not set +# CONFIG_MTD_ONENAND is not set + +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set + +# +# UBI - Unsorted block images +# +# CONFIG_MTD_UBI is not set +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_COW_COMMON is not set +# CONFIG_BLK_DEV_LOOP is not set +# CONFIG_BLK_DEV_NBD is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=4096 +# CONFIG_BLK_DEV_XIP is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +# CONFIG_BLK_DEV_HD is not set +CONFIG_MISC_DEVICES=y +# CONFIG_AD525X_DPOT is not set +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_ISL29003 is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +# CONFIG_RAID_ATTRS is not set +# CONFIG_SCSI is not set +# CONFIG_SCSI_DMA is not set +# CONFIG_SCSI_NETLINK is not set +# CONFIG_ATA is not set +# CONFIG_MD is not set +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_MACVLAN is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set +# CONFIG_VETH is not set +CONFIG_PHYLIB=y + +# +# MII PHY device drivers +# +# CONFIG_MARVELL_PHY is not set +# CONFIG_DAVICOM_PHY is not set +# CONFIG_QSEMI_PHY is not set +# CONFIG_LXT_PHY is not set +# CONFIG_CICADA_PHY is not set +# CONFIG_VITESSE_PHY is not set +# CONFIG_SMSC_PHY is not set +# CONFIG_BROADCOM_PHY is not set +# CONFIG_ICPLUS_PHY is not set +# CONFIG_REALTEK_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_LSI_ET1011C_PHY is not set +# CONFIG_FIXED_PHY is not set +# CONFIG_MDIO_BITBANG is not set +CONFIG_NET_ETHERNET=y +CONFIG_MII=y +CONFIG_BFIN_MAC=y +CONFIG_BFIN_TX_DESC_NUM=10 +CONFIG_BFIN_RX_DESC_NUM=20 +# CONFIG_BFIN_MAC_RMII is not set +CONFIG_BFIN_MAC_USE_HWSTAMP=y +# CONFIG_SMC91X is not set +# CONFIG_DM9000 is not set +# CONFIG_ENC28J60 is not set +# CONFIG_ETHOC is not set +# CONFIG_SMSC911X is not set +# CONFIG_DNET is not set +# CONFIG_ADF702X is not set +# CONFIG_IBM_NEW_EMAC_ZMII is not set +# CONFIG_IBM_NEW_EMAC_RGMII is not set +# CONFIG_IBM_NEW_EMAC_TAH is not set +# CONFIG_IBM_NEW_EMAC_EMAC4 is not set +# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set +# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set +# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set +# CONFIG_B44 is not set +# CONFIG_KS8842 is not set +# CONFIG_KS8851 is not set +# CONFIG_KS8851_MLL is not set +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set +# CONFIG_WLAN is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +# CONFIG_NETCONSOLE is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_ISDN is not set +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_UINPUT is not set +# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_PCF8574 is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_BFIN_DMA_INTERFACE=m +# CONFIG_BFIN_PPI is not set +# CONFIG_BFIN_PPIFCD is not set +# CONFIG_BFIN_SIMPLE_TIMER is not set +# CONFIG_BFIN_SPI_ADC is not set +# CONFIG_BFIN_SPORT is not set +# CONFIG_BFIN_TWI_LCD is not set +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +# CONFIG_VT_HW_CONSOLE_BINDING is not set +# CONFIG_DEVKMEM is not set +CONFIG_BFIN_JTAG_COMM=m +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_MAX3100 is not set +CONFIG_SERIAL_BFIN=y +CONFIG_SERIAL_BFIN_CONSOLE=y +CONFIG_SERIAL_BFIN_DMA=y +# CONFIG_SERIAL_BFIN_PIO is not set +CONFIG_SERIAL_BFIN_UART0=y +# CONFIG_BFIN_UART0_CTSRTS is not set +# CONFIG_SERIAL_BFIN_UART1 is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +# CONFIG_SERIAL_BFIN_SPORT is not set +CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set +# CONFIG_LEGACY_PTYS is not set +CONFIG_BFIN_OTP=y +# CONFIG_BFIN_OTP_WRITE_ENABLE is not set +# CONFIG_IPMI_HANDLER is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_R3964 is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_HELPER_AUTO=y + +# +# I2C Hardware Bus support +# + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +CONFIG_I2C_BLACKFIN_TWI=y +CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 +# CONFIG_I2C_GPIO is not set +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_SIMTEC is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_STUB is not set + +# +# Miscellaneous I2C Chip support +# +# CONFIG_DS1682 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# CONFIG_I2C_DEBUG_CHIP is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y + +# +# SPI Master Controller Drivers +# +CONFIG_SPI_BFIN=y +CONFIG_SPI_BFIN_LOCK=y +# CONFIG_SPI_BFIN_SPORT is not set +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_GPIO is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_TLE62X0 is not set + +# +# PPS support +# +# CONFIG_PPS is not set +CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y +CONFIG_GPIOLIB=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_SYSFS=y + +# +# Memory mapped GPIO expanders: +# + +# +# I2C GPIO expanders: +# +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_ADP5588 is not set + +# +# PCI GPIO expanders: +# + +# +# SPI GPIO expanders: +# +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MCP23S08 is not set +# CONFIG_GPIO_MC33880 is not set + +# +# AC97 GPIO expanders: +# +# CONFIG_W1 is not set +# CONFIG_POWER_SUPPLY is not set +# CONFIG_HWMON is not set +# CONFIG_THERMAL is not set +CONFIG_WATCHDOG=y +# CONFIG_WATCHDOG_NOWAYOUT is not set + +# +# Watchdog Device Drivers +# +# CONFIG_SOFT_WATCHDOG is not set +CONFIG_BFIN_WDT=y +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_TPS65010 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_MC13783 is not set +# CONFIG_AB3100_CORE is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_REGULATOR is not set +# CONFIG_MEDIA_SUPPORT is not set + +# +# Graphics support +# +# CONFIG_VGASTATE is not set +# CONFIG_VIDEO_OUTPUT_CONTROL is not set +# CONFIG_FB is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set + +# +# Display device support +# +# CONFIG_DISPLAY_SUPPORT is not set + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +# CONFIG_SOUND is not set +# CONFIG_HID_SUPPORT is not set +# CONFIG_USB_SUPPORT is not set +CONFIG_MMC=y +CONFIG_MMC_DEBUG=y +# CONFIG_MMC_UNSAFE_RESUME is not set + +# +# MMC/SD/SDIO Card Drivers +# +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_BOUNCE=y +# CONFIG_SDIO_UART is not set +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_SDHCI is not set +# CONFIG_MMC_AT91 is not set +# CONFIG_MMC_ATMELMCI is not set +CONFIG_MMC_SPI=y +# CONFIG_SDH_BFIN is not set +# CONFIG_MEMSTICK is not set +# CONFIG_NEW_LEDS is not set +# CONFIG_ACCESSIBILITY is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_X1205 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_DS3234 is not set +# CONFIG_RTC_DRV_PCF2123 is not set + +# +# Platform RTC drivers +# +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_V3020 is not set + +# +# on-CPU RTC drivers +# +CONFIG_RTC_DRV_BFIN=y +# CONFIG_DMADEVICES is not set +# CONFIG_AUXDISPLAY is not set +# CONFIG_UIO is not set + +# +# TI VLYNQ +# +# CONFIG_STAGING is not set + +# +# Firmware Drivers +# +# CONFIG_FIRMWARE_MEMMAP is not set +# CONFIG_SIGMA is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT3_FS is not set +# CONFIG_EXT4_FS is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_FS_POSIX_ACL is not set +# CONFIG_XFS_FS is not set +# CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y +# CONFIG_DNOTIFY is not set +CONFIG_INOTIFY=y +CONFIG_INOTIFY_USER=y +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set +# CONFIG_FUSE_FS is not set + +# +# Caches +# +# CONFIG_FSCACHE is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=m +# CONFIG_MSDOS_FS is not set +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_SYSCTL=y +CONFIG_SYSFS=y +# CONFIG_HUGETLB_PAGE is not set +# CONFIG_CONFIGFS_FS is not set +# CONFIG_MISC_FILESYSTEMS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V3=y +# CONFIG_NFS_V3_ACL is not set +# CONFIG_NFS_V4 is not set +CONFIG_ROOT_NFS=y +# CONFIG_NFSD is not set +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=y +# CONFIG_RPCSEC_GSS_KRB5 is not set +# CONFIG_RPCSEC_GSS_SPKM3 is not set +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_NLS=m +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=m +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +CONFIG_NLS_ISO8859_1=m +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +CONFIG_NLS_UTF8=m +# CONFIG_DLM is not set + +# +# Kernel hacking +# +# CONFIG_PRINTK_TIME is not set +CONFIG_ENABLE_WARN_DEPRECATED=y +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 +# CONFIG_MAGIC_SYSRQ is not set +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_DEBUG_FS=y +# CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_SHIRQ=y +CONFIG_DETECT_SOFTLOCKUP=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_DETECT_HUNG_TASK=y +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +CONFIG_SCHED_DEBUG=y +# CONFIG_SCHEDSTATS is not set +# CONFIG_TIMER_STATS is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_DEBUG_SLAB is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_SPINLOCK_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_NOMMU_REGIONS is not set +# CONFIG_DEBUG_WRITECOUNT is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_LIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_CREDENTIALS is not set +# CONFIG_FRAME_POINTER is not set +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_TRACING_SUPPORT=y +# CONFIG_FTRACE is not set +# CONFIG_DYNAMIC_DEBUG is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +# CONFIG_DEBUG_STACKOVERFLOW is not set +# CONFIG_DEBUG_STACK_USAGE is not set +CONFIG_DEBUG_VERBOSE=y +CONFIG_DEBUG_MMRS=y +CONFIG_DEBUG_HWERR=y +CONFIG_EXACT_HWERR=y +CONFIG_DEBUG_DOUBLEFAULT=y +CONFIG_DEBUG_DOUBLEFAULT_PRINT=y +# CONFIG_DEBUG_DOUBLEFAULT_RESET is not set +# CONFIG_DEBUG_ICACHE_CHECK is not set +CONFIG_DEBUG_HUNT_FOR_ZERO=y +CONFIG_DEBUG_BFIN_HWTRACE_ON=y +# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_OFF is not set +CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y +# CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_TWO is not set +CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION=1 +# CONFIG_DEBUG_BFIN_HWTRACE_EXPAND is not set +CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE=y +CONFIG_EARLY_PRINTK=y +CONFIG_CPLB_INFO=y +CONFIG_ACCESS_CHECK=y +# CONFIG_BFIN_ISRAM_SELF_TEST is not set + +# +# Security options +# +# CONFIG_KEYS is not set +CONFIG_SECURITY=y +# CONFIG_SECURITYFS is not set +# CONFIG_SECURITY_NETWORK is not set +# CONFIG_SECURITY_PATH is not set +# CONFIG_SECURITY_FILE_CAPABILITIES is not set +# CONFIG_SECURITY_TOMOYO is not set +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +# CONFIG_CRYPTO_MANAGER is not set +# CONFIG_CRYPTO_MANAGER2 is not set +# CONFIG_CRYPTO_GF128MUL is not set +# CONFIG_CRYPTO_NULL is not set +# CONFIG_CRYPTO_CRYPTD is not set +# CONFIG_CRYPTO_AUTHENC is not set +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_SEQIV is not set + +# +# Block modes +# +# CONFIG_CRYPTO_CBC is not set +# CONFIG_CRYPTO_CTR is not set +# CONFIG_CRYPTO_CTS is not set +# CONFIG_CRYPTO_ECB is not set +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set + +# +# Hash modes +# +# CONFIG_CRYPTO_HMAC is not set +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +# CONFIG_CRYPTO_CRC32C is not set +# CONFIG_CRYPTO_GHASH is not set +# CONFIG_CRYPTO_MD4 is not set +# CONFIG_CRYPTO_MD5 is not set +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +# CONFIG_CRYPTO_SHA1 is not set +# CONFIG_CRYPTO_SHA256 is not set +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +# CONFIG_CRYPTO_AES is not set +# CONFIG_CRYPTO_ANUBIS is not set +# CONFIG_CRYPTO_ARC4 is not set +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +# CONFIG_CRYPTO_DES is not set +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +# CONFIG_CRYPTO_TWOFISH is not set + +# +# Compression +# +# CONFIG_CRYPTO_DEFLATE is not set +# CONFIG_CRYPTO_ZLIB is not set +# CONFIG_CRYPTO_LZO is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +CONFIG_CRYPTO_HW=y +# CONFIG_BINARY_PRINTF is not set + +# +# Library routines +# +CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y +CONFIG_CRC_CCITT=m +# CONFIG_CRC16 is not set +# CONFIG_CRC_T10DIF is not set +CONFIG_CRC_ITU_T=y +CONFIG_CRC32=y +CONFIG_CRC7=y +# CONFIG_LIBCRC32C is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/blackfin/mach-bf518/boards/Kconfig b/arch/blackfin/mach-bf518/boards/Kconfig index 96163514ed22..252261ec04c4 100644 --- a/arch/blackfin/mach-bf518/boards/Kconfig +++ b/arch/blackfin/mach-bf518/boards/Kconfig @@ -9,4 +9,9 @@ config BFIN518F_EZBRD help BF518-EZBRD board support. +config BFIN518F_TCM + bool "Bluetechnix TCM-BF518" + help + Bluetechnix TCM-BF518 board support. + endchoice diff --git a/arch/blackfin/mach-bf518/boards/Makefile b/arch/blackfin/mach-bf518/boards/Makefile index 172e859c3a7f..a9ef25c6b302 100644 --- a/arch/blackfin/mach-bf518/boards/Makefile +++ b/arch/blackfin/mach-bf518/boards/Makefile @@ -3,3 +3,4 @@ # obj-$(CONFIG_BFIN518F_EZBRD) += ezbrd.o +obj-$(CONFIG_BFIN518F_TCM) += tcm-bf518.o diff --git a/arch/blackfin/mach-bf518/boards/tcm-bf518.c b/arch/blackfin/mach-bf518/boards/tcm-bf518.c new file mode 100644 index 000000000000..9b72e5cb21fe --- /dev/null +++ b/arch/blackfin/mach-bf518/boards/tcm-bf518.c @@ -0,0 +1,753 @@ +/* + * Copyright 2004-2009 Analog Devices Inc. + * 2005 National ICT Australia (NICTA) + * Aidan Williams + * + * Licensed under the GPL-2 or later. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Name the Board for the /proc/cpuinfo + */ +const char bfin_board_name[] = "Bluetechnix TCM-BF518"; + +/* + * Driver needs to know address, irq and flag pin. + */ + +#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) +static struct mtd_partition tcm_partitions[] = { + { + .name = "bootloader(nor)", + .size = 0x40000, + .offset = 0, + }, + { + .name = "linux(nor)", + .size = 0x1C0000, + .offset = MTDPART_OFS_APPEND, + } +}; + +static struct physmap_flash_data tcm_flash_data = { + .width = 2, + .parts = tcm_partitions, + .nr_parts = ARRAY_SIZE(tcm_partitions), +}; + +static struct resource tcm_flash_resource = { + .start = 0x20000000, + .end = 0x201fffff, + .flags = IORESOURCE_MEM, +}; + +static struct platform_device tcm_flash_device = { + .name = "physmap-flash", + .id = 0, + .dev = { + .platform_data = &tcm_flash_data, + }, + .num_resources = 1, + .resource = &tcm_flash_resource, +}; +#endif + +#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) +static struct platform_device rtc_device = { + .name = "rtc-bfin", + .id = -1, +}; +#endif + +#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) +static struct platform_device bfin_mii_bus = { + .name = "bfin_mii_bus", +}; + +static struct platform_device bfin_mac_device = { + .name = "bfin_mac", + .dev.platform_data = &bfin_mii_bus, +}; +#endif + +#if defined(CONFIG_MTD_M25P80) \ + || defined(CONFIG_MTD_M25P80_MODULE) +static struct mtd_partition bfin_spi_flash_partitions[] = { + { + .name = "bootloader(spi)", + .size = 0x00040000, + .offset = 0, + .mask_flags = MTD_CAP_ROM + }, { + .name = "linux kernel(spi)", + .size = MTDPART_SIZ_FULL, + .offset = MTDPART_OFS_APPEND, + } +}; + +static struct flash_platform_data bfin_spi_flash_data = { + .name = "m25p80", + .parts = bfin_spi_flash_partitions, + .nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions), + .type = "m25p16", +}; + +/* SPI flash chip (m25p64) */ +static struct bfin5xx_spi_chip spi_flash_chip_info = { + .enable_dma = 0, /* use dma transfer with this chip*/ + .bits_per_word = 8, +}; +#endif + +#if defined(CONFIG_BFIN_SPI_ADC) \ + || defined(CONFIG_BFIN_SPI_ADC_MODULE) +/* SPI ADC chip */ +static struct bfin5xx_spi_chip spi_adc_chip_info = { + .enable_dma = 1, /* use dma transfer with this chip*/ + .bits_per_word = 16, +}; +#endif + +#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) +static struct bfin5xx_spi_chip mmc_spi_chip_info = { + .enable_dma = 0, + .bits_per_word = 8, +}; +#endif + +#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) +static struct bfin5xx_spi_chip spi_ad7877_chip_info = { + .enable_dma = 0, + .bits_per_word = 16, +}; + +static const struct ad7877_platform_data bfin_ad7877_ts_info = { + .model = 7877, + .vref_delay_usecs = 50, /* internal, no capacitor */ + .x_plate_ohms = 419, + .y_plate_ohms = 486, + .pressure_max = 1000, + .pressure_min = 0, + .stopacq_polarity = 1, + .first_conversion_delay = 3, + .acquisition_time = 1, + .averaging = 1, + .pen_down_acc_interval = 1, +}; +#endif + +#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \ + && defined(CONFIG_SND_SOC_WM8731_SPI) +static struct bfin5xx_spi_chip spi_wm8731_chip_info = { + .enable_dma = 0, + .bits_per_word = 16, +}; +#endif + +#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) +static struct bfin5xx_spi_chip spidev_chip_info = { + .enable_dma = 0, + .bits_per_word = 8, +}; +#endif + +static struct spi_board_info bfin_spi_board_info[] __initdata = { +#if defined(CONFIG_MTD_M25P80) \ + || defined(CONFIG_MTD_M25P80_MODULE) + { + /* the modalias must be the same as spi device driver name */ + .modalias = "m25p80", /* Name of spi_driver for this device */ + .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 0, /* Framework bus number */ + .chip_select = 2, /* SPI0_SSEL2 */ + .platform_data = &bfin_spi_flash_data, + .controller_data = &spi_flash_chip_info, + .mode = SPI_MODE_3, + }, +#endif + +#if defined(CONFIG_BFIN_SPI_ADC) \ + || defined(CONFIG_BFIN_SPI_ADC_MODULE) + { + .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */ + .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 0, /* Framework bus number */ + .chip_select = 1, /* Framework chip select. */ + .platform_data = NULL, /* No spi_driver specific config */ + .controller_data = &spi_adc_chip_info, + }, +#endif + +#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) + { + .modalias = "mmc_spi", + .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 0, + .chip_select = 5, + .controller_data = &mmc_spi_chip_info, + .mode = SPI_MODE_3, + }, +#endif +#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) + { + .modalias = "ad7877", + .platform_data = &bfin_ad7877_ts_info, + .irq = IRQ_PF8, + .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 0, + .chip_select = 2, + .controller_data = &spi_ad7877_chip_info, + }, +#endif +#if defined(CONFIG_SND_SOC_WM8731) || defined(CONFIG_SND_SOC_WM8731_MODULE) \ + && defined(CONFIG_SND_SOC_WM8731_SPI) + { + .modalias = "wm8731", + .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 0, + .chip_select = 5, + .controller_data = &spi_wm8731_chip_info, + .mode = SPI_MODE_0, + }, +#endif +#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) + { + .modalias = "spidev", + .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 0, + .chip_select = 1, + .controller_data = &spidev_chip_info, + }, +#endif +#if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) + { + .modalias = "bfin-lq035q1-spi", + .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 0, + .chip_select = 1, + .controller_data = &lq035q1_spi_chip_info, + .mode = SPI_CPHA | SPI_CPOL, + }, +#endif +}; + +/* SPI controller data */ +#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) +/* SPI (0) */ +static struct bfin5xx_spi_master bfin_spi0_info = { + .num_chipselect = 6, + .enable_dma = 1, /* master has the ability to do dma transfer */ + .pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0}, +}; + +static struct resource bfin_spi0_resource[] = { + [0] = { + .start = SPI0_REGBASE, + .end = SPI0_REGBASE + 0xFF, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = CH_SPI0, + .end = CH_SPI0, + .flags = IORESOURCE_DMA, + }, + [2] = { + .start = IRQ_SPI0, + .end = IRQ_SPI0, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device bfin_spi0_device = { + .name = "bfin-spi", + .id = 0, /* Bus number */ + .num_resources = ARRAY_SIZE(bfin_spi0_resource), + .resource = bfin_spi0_resource, + .dev = { + .platform_data = &bfin_spi0_info, /* Passed to driver */ + }, +}; + +/* SPI (1) */ +static struct bfin5xx_spi_master bfin_spi1_info = { + .num_chipselect = 5, + .enable_dma = 1, /* master has the ability to do dma transfer */ + .pin_req = {P_SPI1_SCK, P_SPI1_MISO, P_SPI1_MOSI, 0}, +}; + +static struct resource bfin_spi1_resource[] = { + [0] = { + .start = SPI1_REGBASE, + .end = SPI1_REGBASE + 0xFF, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = CH_SPI1, + .end = CH_SPI1, + .flags = IORESOURCE_DMA, + }, + [2] = { + .start = IRQ_SPI1, + .end = IRQ_SPI1, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device bfin_spi1_device = { + .name = "bfin-spi", + .id = 1, /* Bus number */ + .num_resources = ARRAY_SIZE(bfin_spi1_resource), + .resource = bfin_spi1_resource, + .dev = { + .platform_data = &bfin_spi1_info, /* Passed to driver */ + }, +}; +#endif /* spi master and devices */ + +#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) +#ifdef CONFIG_SERIAL_BFIN_UART0 +static struct resource bfin_uart0_resources[] = { + { + .start = UART0_THR, + .end = UART0_GCTL+2, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_UART0_RX, + .end = IRQ_UART0_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART0_ERROR, + .end = IRQ_UART0_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART0_TX, + .end = CH_UART0_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART0_RX, + .end = CH_UART0_RX, + .flags = IORESOURCE_DMA, + }, +}; + +unsigned short bfin_uart0_peripherals[] = { + P_UART0_TX, P_UART0_RX, 0 +}; + +static struct platform_device bfin_uart0_device = { + .name = "bfin-uart", + .id = 0, + .num_resources = ARRAY_SIZE(bfin_uart0_resources), + .resource = bfin_uart0_resources, + .dev = { + .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ + }, +}; +#endif +#ifdef CONFIG_SERIAL_BFIN_UART1 +static struct resource bfin_uart1_resources[] = { + { + .start = UART1_THR, + .end = UART1_GCTL+2, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_UART1_RX, + .end = IRQ_UART1_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_UART1_ERROR, + .end = IRQ_UART1_ERROR, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART1_TX, + .end = CH_UART1_TX, + .flags = IORESOURCE_DMA, + }, + { + .start = CH_UART1_RX, + .end = CH_UART1_RX, + .flags = IORESOURCE_DMA, + }, +}; + +unsigned short bfin_uart1_peripherals[] = { + P_UART1_TX, P_UART1_RX, 0 +}; + +static struct platform_device bfin_uart1_device = { + .name = "bfin-uart", + .id = 1, + .num_resources = ARRAY_SIZE(bfin_uart1_resources), + .resource = bfin_uart1_resources, + .dev = { + .platform_data = &bfin_uart1_peripherals, /* Passed to driver */ + }, +}; +#endif +#endif + +#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) +#ifdef CONFIG_BFIN_SIR0 +static struct resource bfin_sir0_resources[] = { + { + .start = 0xFFC00400, + .end = 0xFFC004FF, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_UART0_RX, + .end = IRQ_UART0_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART0_RX, + .end = CH_UART0_RX+1, + .flags = IORESOURCE_DMA, + }, +}; + +static struct platform_device bfin_sir0_device = { + .name = "bfin_sir", + .id = 0, + .num_resources = ARRAY_SIZE(bfin_sir0_resources), + .resource = bfin_sir0_resources, +}; +#endif +#ifdef CONFIG_BFIN_SIR1 +static struct resource bfin_sir1_resources[] = { + { + .start = 0xFFC02000, + .end = 0xFFC020FF, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_UART1_RX, + .end = IRQ_UART1_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = CH_UART1_RX, + .end = CH_UART1_RX+1, + .flags = IORESOURCE_DMA, + }, +}; + +static struct platform_device bfin_sir1_device = { + .name = "bfin_sir", + .id = 1, + .num_resources = ARRAY_SIZE(bfin_sir1_resources), + .resource = bfin_sir1_resources, +}; +#endif +#endif + +#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) +static struct resource bfin_twi0_resource[] = { + [0] = { + .start = TWI0_REGBASE, + .end = TWI0_REGBASE, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = IRQ_TWI, + .end = IRQ_TWI, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device i2c_bfin_twi_device = { + .name = "i2c-bfin-twi", + .id = 0, + .num_resources = ARRAY_SIZE(bfin_twi0_resource), + .resource = bfin_twi0_resource, +}; +#endif + +static struct i2c_board_info __initdata bfin_i2c_board_info[] = { +#if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE) + { + I2C_BOARD_INFO("pcf8574_lcd", 0x22), + }, +#endif +#if defined(CONFIG_INPUT_PCF8574) || defined(CONFIG_INPUT_PCF8574_MODULE) + { + I2C_BOARD_INFO("pcf8574_keypad", 0x27), + .irq = IRQ_PF8, + }, +#endif +}; + +#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART +static struct resource bfin_sport0_uart_resources[] = { + { + .start = SPORT0_TCR1, + .end = SPORT0_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT0_RX, + .end = IRQ_SPORT0_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT0_ERROR, + .end = IRQ_SPORT0_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport0_peripherals[] = { + P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, + P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 +}; + +static struct platform_device bfin_sport0_uart_device = { + .name = "bfin-sport-uart", + .id = 0, + .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources), + .resource = bfin_sport0_uart_resources, + .dev = { + .platform_data = &bfin_sport0_peripherals, /* Passed to driver */ + }, +}; +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART +static struct resource bfin_sport1_uart_resources[] = { + { + .start = SPORT1_TCR1, + .end = SPORT1_MRCS3+4, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_SPORT1_RX, + .end = IRQ_SPORT1_RX+1, + .flags = IORESOURCE_IRQ, + }, + { + .start = IRQ_SPORT1_ERROR, + .end = IRQ_SPORT1_ERROR, + .flags = IORESOURCE_IRQ, + }, +}; + +unsigned short bfin_sport1_peripherals[] = { + P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, + P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0 +}; + +static struct platform_device bfin_sport1_uart_device = { + .name = "bfin-sport-uart", + .id = 1, + .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources), + .resource = bfin_sport1_uart_resources, + .dev = { + .platform_data = &bfin_sport1_peripherals, /* Passed to driver */ + }, +}; +#endif +#endif + +#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) +#include +#include + +static struct gpio_keys_button bfin_gpio_keys_table[] = { + {BTN_0, GPIO_PG0, 1, "gpio-keys: BTN0"}, + {BTN_1, GPIO_PG13, 1, "gpio-keys: BTN1"}, +}; + +static struct gpio_keys_platform_data bfin_gpio_keys_data = { + .buttons = bfin_gpio_keys_table, + .nbuttons = ARRAY_SIZE(bfin_gpio_keys_table), +}; + +static struct platform_device bfin_device_gpiokeys = { + .name = "gpio-keys", + .dev = { + .platform_data = &bfin_gpio_keys_data, + }, +}; +#endif + +#if defined(CONFIG_SDH_BFIN) || defined(CONFIG_SDH_BFIN_MODULE) + +static struct bfin_sd_host bfin_sdh_data = { + .dma_chan = CH_RSI, + .irq_int0 = IRQ_RSI_INT0, + .pin_req = {P_RSI_DATA0, P_RSI_DATA1, P_RSI_DATA2, P_RSI_DATA3, P_RSI_CMD, P_RSI_CLK, 0}, +}; + +static struct platform_device bf51x_sdh_device = { + .name = "bfin-sdh", + .id = 0, + .dev = { + .platform_data = &bfin_sdh_data, + }, +}; +#endif + +static const unsigned int cclk_vlev_datasheet[] = +{ + VRPAIR(VLEV_100, 400000000), + VRPAIR(VLEV_105, 426000000), + VRPAIR(VLEV_110, 500000000), + VRPAIR(VLEV_115, 533000000), + VRPAIR(VLEV_120, 600000000), +}; + +static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = { + .tuple_tab = cclk_vlev_datasheet, + .tabsize = ARRAY_SIZE(cclk_vlev_datasheet), + .vr_settling_time = 25 /* us */, +}; + +static struct platform_device bfin_dpmc = { + .name = "bfin dpmc", + .dev = { + .platform_data = &bfin_dmpc_vreg_data, + }, +}; + +static struct platform_device *tcm_devices[] __initdata = { + + &bfin_dpmc, + +#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) + &rtc_device, +#endif + +#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) + &bfin_mii_bus, + &bfin_mac_device, +#endif + +#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) + &bfin_spi0_device, + &bfin_spi1_device, +#endif + +#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART1 + &bfin_uart1_device, +#endif +#endif + +#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) +#ifdef CONFIG_BFIN_SIR0 + &bfin_sir0_device, +#endif +#ifdef CONFIG_BFIN_SIR1 + &bfin_sir1_device, +#endif +#endif + +#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) + &i2c_bfin_twi_device, +#endif + +#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART + &bfin_sport0_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART + &bfin_sport1_uart_device, +#endif +#endif + +#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) + &bfin_device_gpiokeys, +#endif + +#if defined(CONFIG_SDH_BFIN) || defined(CONFIG_SDH_BFIN_MODULE) + &bf51x_sdh_device, +#endif + +#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) + &tcm_flash_device, +#endif +}; + +static int __init tcm_init(void) +{ + printk(KERN_INFO "%s(): registering device resources\n", __func__); + i2c_register_board_info(0, bfin_i2c_board_info, + ARRAY_SIZE(bfin_i2c_board_info)); + platform_add_devices(tcm_devices, ARRAY_SIZE(tcm_devices)); + spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); + return 0; +} + +arch_initcall(tcm_init); + +static struct platform_device *tcm_early_devices[] __initdata = { +#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) +#ifdef CONFIG_SERIAL_BFIN_UART0 + &bfin_uart0_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_UART1 + &bfin_uart1_device, +#endif +#endif + +#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE) +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART + &bfin_sport0_uart_device, +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART + &bfin_sport1_uart_device, +#endif +#endif +}; + +void __init native_machine_early_platform_add_devices(void) +{ + printk(KERN_INFO "register early platform devices\n"); + early_platform_add_devices(tcm_early_devices, + ARRAY_SIZE(tcm_early_devices)); +} + +void native_machine_restart(char *cmd) +{ + /* workaround reboot hang when booting from SPI */ + if ((bfin_read_SYSCR() & 0x7) == 0x3) + bfin_reset_boot_spi_cs(P_DEFAULT_BOOT_SPI_CS); +} + +void bfin_get_ether_addr(char *addr) +{ + random_ether_addr(addr); + printk(KERN_WARNING "%s:%s: Setting Ethernet MAC to a random one\n", __FILE__, __func__); +} +EXPORT_SYMBOL(bfin_get_ether_addr); -- cgit v1.2.3 From 441504df6b81302c06010083c8d63b1465ead405 Mon Sep 17 00:00:00 2001 From: Yi Li Date: Tue, 19 Jan 2010 04:35:28 +0000 Subject: Blackfin: add support for irqflags tracing Signed-off-by: Yi Li Signed-off-by: Mike Frysinger --- arch/blackfin/include/asm/context.S | 12 ++++++++++++ arch/blackfin/mach-common/entry.S | 15 +++++++++++++++ arch/blackfin/mach-common/interrupt.S | 7 +++++++ 3 files changed, 34 insertions(+) diff --git a/arch/blackfin/include/asm/context.S b/arch/blackfin/include/asm/context.S index fada8e974a73..1f9060395a0a 100644 --- a/arch/blackfin/include/asm/context.S +++ b/arch/blackfin/include/asm/context.S @@ -72,6 +72,11 @@ sti r0; #else cli r0; +#endif +#ifdef CONFIG_TRACE_IRQFLAGS + sp += -12; + call _trace_hardirqs_off; + sp += 12; #endif [--sp] = RETI; /*orig_pc*/ /* Clear all L registers. */ @@ -279,6 +284,13 @@ RETN = [sp++]; RETX = [sp++]; RETI = [sp++]; + +#ifdef CONFIG_TRACE_IRQFLAGS + sp += -12; + call _trace_hardirqs_on; + sp += 12; +#endif + RETS = [sp++]; #ifdef CONFIG_SMP diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S index ccfa7c490ff9..1fa414f7852f 100644 --- a/arch/blackfin/mach-common/entry.S +++ b/arch/blackfin/mach-common/entry.S @@ -965,6 +965,13 @@ ENTRY(_evt_evt14) sti r0; #else cli r0; +#endif +#ifdef CONFIG_TRACE_IRQFLAGS + [--sp] = rets; + sp += -12; + call _trace_hardirqs_off; + sp += 12; + rets = [sp++]; #endif [--sp] = RETI; SP += 4; @@ -989,6 +996,14 @@ ENTRY(_schedule_and_signal_from_int) p1 = rets; [sp + PT_RESERVED] = p1; +#ifdef CONFIG_TRACE_IRQFLAGS + /* trace_hardirqs_on() checks if all irqs are disabled. But here IRQ 15 + * is turned on, so disable all irqs. */ + cli r0; + sp += -12; + call _trace_hardirqs_on; + sp += 12; +#endif #ifdef CONFIG_SMP GET_PDA(p0, r0); /* Fetch current PDA (can't migrate to other CPU here) */ r0 = [p0 + PDA_IRQFLAGS]; diff --git a/arch/blackfin/mach-common/interrupt.S b/arch/blackfin/mach-common/interrupt.S index df984960cf90..0a0c088ead8c 100644 --- a/arch/blackfin/mach-common/interrupt.S +++ b/arch/blackfin/mach-common/interrupt.S @@ -87,6 +87,13 @@ __common_int_entry: sti r1; #else cli r1; +#endif +#ifdef CONFIG_TRACE_IRQFLAGS + [--sp] = r0; + sp += -12; + call _trace_hardirqs_off; + sp += 12; + r0 = [sp++]; #endif [--sp] = RETI; /* orig_pc */ /* Clear all L registers. */ -- cgit v1.2.3 From 0531c467da6438f278e9420a30f73eea2fdb73bd Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Tue, 19 Jan 2010 07:04:29 +0000 Subject: Blackfin: bf537-stamp: move ADF7021 MAC storage into platform resources Just generate a random MAC on the demo board since the ADF702x lacks dedicated storage for such things. Signed-off-by: Mike Frysinger --- arch/blackfin/mach-bf537/boards/stamp.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c index 26429702db58..f78861a617d9 100644 --- a/arch/blackfin/mach-bf537/boards/stamp.c +++ b/arch/blackfin/mach-bf537/boards/stamp.c @@ -22,6 +22,7 @@ #endif #include #include +#include #include #include #include @@ -816,6 +817,12 @@ static struct adf702x_platform_data adf7021_platform_data = { .adf702x_regs = adf7021_regs, .tx_reg = TXREG, }; +static inline void adf702x_mac_init(void) +{ + random_ether_addr(adf7021_platform_data.mac_addr); +} +#else +static inline void adf702x_mac_init(void) {} #endif #if defined(CONFIG_MTD_DATAFLASH) \ @@ -1967,6 +1974,7 @@ static int __init stamp_init(void) { printk(KERN_INFO "%s(): registering device resources\n", __func__); bfin_plat_nand_init(); + adf702x_mac_init(); platform_add_devices(stamp_devices, ARRAY_SIZE(stamp_devices)); i2c_register_board_info(0, bfin_i2c_board_info, ARRAY_SIZE(bfin_i2c_board_info)); -- cgit v1.2.3 From 5e8d3210b5bffbe64afca9152241284a46611c7e Mon Sep 17 00:00:00 2001 From: Barry Song Date: Tue, 19 Jan 2010 11:01:08 +0000 Subject: Blackfin: fill out the signal si_addr when sending a SIGBUS/SIGSEGV Some userspace applications use this member in diagnosing crashes. It also makes some LTP tests pass (i.e. the Blackfin arch behaves more like everyone else). Signed-off-by: Barry Song Signed-off-by: Mike Frysinger --- arch/blackfin/kernel/traps.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c index 85766805b03a..4eaca2d1deed 100644 --- a/arch/blackfin/kernel/traps.c +++ b/arch/blackfin/kernel/traps.c @@ -260,9 +260,7 @@ asmlinkage notrace void trap_c(struct pt_regs *fp) #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON int j; #endif -#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO unsigned int cpu = raw_smp_processor_id(); -#endif const char *strerror = NULL; int sig = 0; siginfo_t info; @@ -651,7 +649,17 @@ asmlinkage notrace void trap_c(struct pt_regs *fp) { info.si_signo = sig; info.si_errno = 0; - info.si_addr = (void __user *)fp->pc; + switch (trapnr) { + case VEC_CPLB_VL: + case VEC_MISALI_D: + case VEC_CPLB_M: + case VEC_CPLB_MHIT: + info.si_addr = (void __user *)cpu_pda[cpu].dcplb_fault_addr; + break; + default: + info.si_addr = (void __user *)fp->pc; + break; + } force_sig_info(sig, &info, current); } -- cgit v1.2.3 From f3dec78333d6369161eb833dbd8c8f006f359fdf Mon Sep 17 00:00:00 2001 From: Michael Hennerich Date: Tue, 19 Jan 2010 14:45:38 +0000 Subject: Blackfin: increase NR_IRQS beyond NR on-chip IRQs This makes room for off-chip IRQ controllers. Signed-off-by: Michael Hennerich Signed-off-by: Mike Frysinger --- arch/blackfin/include/asm/irq.h | 3 +++ arch/blackfin/mach-bf518/include/mach/irq.h | 3 ++- arch/blackfin/mach-bf527/include/mach/irq.h | 3 ++- arch/blackfin/mach-bf533/include/mach/irq.h | 3 ++- arch/blackfin/mach-bf537/include/mach/irq.h | 3 ++- arch/blackfin/mach-bf538/include/mach/irq.h | 3 ++- arch/blackfin/mach-bf548/include/mach/irq.h | 3 ++- arch/blackfin/mach-bf561/include/mach/irq.h | 3 ++- arch/blackfin/mach-common/ints-priority.c | 2 +- 9 files changed, 18 insertions(+), 8 deletions(-) diff --git a/arch/blackfin/include/asm/irq.h b/arch/blackfin/include/asm/irq.h index e7c0623f9091..89de539ed010 100644 --- a/arch/blackfin/include/asm/irq.h +++ b/arch/blackfin/include/asm/irq.h @@ -12,6 +12,9 @@ #include +/* IRQs that may be used by external irq_chip controllers */ +#define NR_SPARE_IRQS 32 + #include /* SYS_IRQS and NR_IRQS are defined in */ diff --git a/arch/blackfin/mach-bf518/include/mach/irq.h b/arch/blackfin/mach-bf518/include/mach/irq.h index 14e52ec7afa5..52edc8483911 100644 --- a/arch/blackfin/mach-bf518/include/mach/irq.h +++ b/arch/blackfin/mach-bf518/include/mach/irq.h @@ -151,7 +151,8 @@ #define GPIO_IRQ_BASE IRQ_PF0 -#define NR_IRQS (IRQ_PH15 + 1) +#define NR_MACH_IRQS (IRQ_PH15 + 1) +#define NR_IRQS (NR_MACH_IRQS + NR_SPARE_IRQS) #define IVG7 7 #define IVG8 8 diff --git a/arch/blackfin/mach-bf527/include/mach/irq.h b/arch/blackfin/mach-bf527/include/mach/irq.h index aa6579a64a2f..17604b4a81cc 100644 --- a/arch/blackfin/mach-bf527/include/mach/irq.h +++ b/arch/blackfin/mach-bf527/include/mach/irq.h @@ -151,7 +151,8 @@ #define GPIO_IRQ_BASE IRQ_PF0 -#define NR_IRQS (IRQ_PH15+1) +#define NR_MACH_IRQS (IRQ_PH15 + 1) +#define NR_IRQS (NR_MACH_IRQS + NR_SPARE_IRQS) #define IVG7 7 #define IVG8 8 diff --git a/arch/blackfin/mach-bf533/include/mach/irq.h b/arch/blackfin/mach-bf533/include/mach/irq.h index c31498be0bbb..1f7e9765d954 100644 --- a/arch/blackfin/mach-bf533/include/mach/irq.h +++ b/arch/blackfin/mach-bf533/include/mach/irq.h @@ -104,7 +104,8 @@ Core Emulation ** #define GPIO_IRQ_BASE IRQ_PF0 -#define NR_IRQS (IRQ_PF15+1) +#define NR_MACH_IRQS (IRQ_PF15 + 1) +#define NR_IRQS (NR_MACH_IRQS + NR_SPARE_IRQS) #define IVG7 7 #define IVG8 8 diff --git a/arch/blackfin/mach-bf537/include/mach/irq.h b/arch/blackfin/mach-bf537/include/mach/irq.h index 0defa9457e7f..9b2cbcac98e0 100644 --- a/arch/blackfin/mach-bf537/include/mach/irq.h +++ b/arch/blackfin/mach-bf537/include/mach/irq.h @@ -134,7 +134,8 @@ #define GPIO_IRQ_BASE IRQ_PF0 -#define NR_IRQS (IRQ_PH15+1) +#define NR_MACH_IRQS (IRQ_PH15 + 1) +#define NR_IRQS (NR_MACH_IRQS + NR_SPARE_IRQS) #define IVG7 7 #define IVG8 8 diff --git a/arch/blackfin/mach-bf538/include/mach/irq.h b/arch/blackfin/mach-bf538/include/mach/irq.h index a4b7fcbc556b..7a479d224dc7 100644 --- a/arch/blackfin/mach-bf538/include/mach/irq.h +++ b/arch/blackfin/mach-bf538/include/mach/irq.h @@ -110,7 +110,8 @@ #define GPIO_IRQ_BASE IRQ_PF0 -#define NR_IRQS (IRQ_PF15+1) +#define NR_MACH_IRQS (IRQ_PF15 + 1) +#define NR_IRQS (NR_MACH_IRQS + NR_SPARE_IRQS) #define IVG7 7 #define IVG8 8 diff --git a/arch/blackfin/mach-bf548/include/mach/irq.h b/arch/blackfin/mach-bf548/include/mach/irq.h index 106db05684ae..1f99b51a3d56 100644 --- a/arch/blackfin/mach-bf548/include/mach/irq.h +++ b/arch/blackfin/mach-bf548/include/mach/irq.h @@ -317,7 +317,8 @@ Events (highest priority) EMU 0 #define GPIO_IRQ_BASE IRQ_PA0 -#define NR_IRQS (IRQ_PJ15+1) +#define NR_MACH_IRQS (IRQ_PJ15 + 1) +#define NR_IRQS (NR_MACH_IRQS + NR_SPARE_IRQS) /* For compatibility reasons with existing code */ diff --git a/arch/blackfin/mach-bf561/include/mach/irq.h b/arch/blackfin/mach-bf561/include/mach/irq.h index 7b208db267bf..c95566ade51b 100644 --- a/arch/blackfin/mach-bf561/include/mach/irq.h +++ b/arch/blackfin/mach-bf561/include/mach/irq.h @@ -265,7 +265,8 @@ #define GPIO_IRQ_BASE IRQ_PF0 -#define NR_IRQS (IRQ_PF47 + 1) +#define NR_MACH_IRQS (IRQ_PF47 + 1) +#define NR_IRQS (NR_MACH_IRQS + NR_SPARE_IRQS) #define IVG7 7 #define IVG8 8 diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c index 3589fe8b86ba..ebf886091187 100644 --- a/arch/blackfin/mach-common/ints-priority.c +++ b/arch/blackfin/mach-common/ints-priority.c @@ -1114,7 +1114,7 @@ int __init init_arch_irq(void) #endif /* if configured as edge, then will be changed to do_edge_IRQ */ - for (irq = GPIO_IRQ_BASE; irq < NR_IRQS; irq++) + for (irq = GPIO_IRQ_BASE; irq < NR_MACH_IRQS; irq++) set_irq_chip_and_handler(irq, &bfin_gpio_irqchip, handle_level_irq); -- cgit v1.2.3 From 439b486746df9f57524e002dfd6117bbc040e925 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Fri, 13 Nov 2009 02:41:07 +0000 Subject: Blackfin: bf548-ezkit: add ASoC CPU DAI resources Signed-off-by: Barry Song Signed-off-by: Mike Frysinger --- arch/blackfin/mach-bf548/boards/ezkit.c | 36 +++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c index b5d8ea187735..5b3929957073 100644 --- a/arch/blackfin/mach-bf548/boards/ezkit.c +++ b/arch/blackfin/mach-bf548/boards/ezkit.c @@ -1257,6 +1257,30 @@ static struct platform_device bfin_dpmc = { }, }; +#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) +static struct platform_device bfin_i2s = { + .name = "bfin-i2s", + .id = CONFIG_SND_BF5XX_SPORT_NUM, + /* TODO: add platform data here */ +}; +#endif + +#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) +static struct platform_device bfin_tdm = { + .name = "bfin-tdm", + .id = CONFIG_SND_BF5XX_SPORT_NUM, + /* TODO: add platform data here */ +}; +#endif + +#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) +static struct platform_device bfin_ac97 = { + .name = "bfin-ac97", + .id = CONFIG_SND_BF5XX_SPORT_NUM, + /* TODO: add platform data here */ +}; +#endif + static struct platform_device *ezkit_devices[] __initdata = { &bfin_dpmc, @@ -1369,6 +1393,18 @@ static struct platform_device *ezkit_devices[] __initdata = { #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) &ezkit_flash_device, #endif + +#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) + &bfin_i2s, +#endif + +#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) + &bfin_tdm, +#endif + +#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) + &bfin_ac97, +#endif }; static int __init ezkit_init(void) -- cgit v1.2.3 From 336746ed8ee8ef503ba79bc4b6f0b5a40e8ab3ce Mon Sep 17 00:00:00 2001 From: Barry Song Date: Tue, 13 Oct 2009 09:19:18 +0000 Subject: Blackfin: bf537-stamp/bf527-ezkit: move ASoC SPORT resources to boards file Rather than declaring pin resources in the drivers, do it in the board. Signed-off-by: Barry Song Signed-off-by: Mike Frysinger --- arch/blackfin/mach-bf527/boards/ezkit.c | 24 ++++++++++++++++++++++++ arch/blackfin/mach-bf537/boards/stamp.c | 25 +++++++++++++++++++++++++ 2 files changed, 49 insertions(+) diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c index 9f2cb2b86528..3d32a35427a6 100644 --- a/arch/blackfin/mach-bf527/boards/ezkit.c +++ b/arch/blackfin/mach-bf527/boards/ezkit.c @@ -494,6 +494,22 @@ static struct bfin5xx_spi_chip spidev_chip_info = { }; #endif +#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) +static struct platform_device bfin_i2s = { + .name = "bfin-i2s", + .id = CONFIG_SND_BF5XX_SPORT_NUM, + /* TODO: add platform data here */ +}; +#endif + +#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) +static struct platform_device bfin_tdm = { + .name = "bfin-tdm", + .id = CONFIG_SND_BF5XX_SPORT_NUM, + /* TODO: add platform data here */ +}; +#endif + #if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) static struct bfin5xx_spi_chip lq035q1_spi_chip_info = { .enable_dma = 0, @@ -1152,6 +1168,14 @@ static struct platform_device *stamp_devices[] __initdata = { #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) &ezkit_flash_device, #endif + +#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) + &bfin_i2s, +#endif + +#if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) + &bfin_tdm, +#endif }; static int __init ezkit_init(void) diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c index f78861a617d9..80b3b577f74e 100644 --- a/arch/blackfin/mach-bf537/boards/stamp.c +++ b/arch/blackfin/mach-bf537/boards/stamp.c @@ -1849,9 +1849,26 @@ static struct platform_device bfin_dpmc = { }, }; +#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) +static struct platform_device bfin_i2s = { + .name = "bfin-i2s", + .id = CONFIG_SND_BF5XX_SPORT_NUM, + /* TODO: add platform data here */ +}; +#endif + #if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) static struct platform_device bfin_tdm = { .name = "bfin-tdm", + .id = CONFIG_SND_BF5XX_SPORT_NUM, + /* TODO: add platform data here */ +}; +#endif + +#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) +static struct platform_device bfin_ac97 = { + .name = "bfin-ac97", + .id = CONFIG_SND_BF5XX_SPORT_NUM, /* TODO: add platform data here */ }; #endif @@ -1965,9 +1982,17 @@ static struct platform_device *stamp_devices[] __initdata = { &stamp_flash_device, #endif +#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) + &bfin_i2s, +#endif + #if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) &bfin_tdm, #endif + +#if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) + &bfin_ac97, +#endif }; static int __init stamp_init(void) -- cgit v1.2.3 From 726e96561e4704278bc5197238f6459e1a63aa77 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Wed, 20 Jan 2010 07:25:31 +0000 Subject: Blackfin: respect the L1 kconfig optimization in the MPU code Restore support for CONFIG_EXCPT_IRQ_SYSC_L1 in the MPU CPLB manager. Signed-off-by: Barry Song Signed-off-by: Mike Frysinger --- arch/blackfin/kernel/cplb-mpu/cplbmgr.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c index 7e6383dc7b20..87b25b1b30ed 100644 --- a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c +++ b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c @@ -31,6 +31,12 @@ int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS]; int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS]; int nr_cplb_flush[NR_CPUS]; +#ifdef CONFIG_EXCPT_IRQ_SYSC_L1 +#define MGR_ATTR __attribute__((l1_text)) +#else +#define MGR_ATTR +#endif + /* * Given the contents of the status register, return the index of the * CPLB that caused the fault. @@ -59,7 +65,7 @@ static int icplb_rr_index[NR_CPUS], dcplb_rr_index[NR_CPUS]; /* * Find an ICPLB entry to be evicted and return its index. */ -static int evict_one_icplb(unsigned int cpu) +MGR_ATTR static int evict_one_icplb(unsigned int cpu) { int i; for (i = first_switched_icplb; i < MAX_CPLBS; i++) @@ -74,7 +80,7 @@ static int evict_one_icplb(unsigned int cpu) return i; } -static int evict_one_dcplb(unsigned int cpu) +MGR_ATTR static int evict_one_dcplb(unsigned int cpu) { int i; for (i = first_switched_dcplb; i < MAX_CPLBS; i++) @@ -89,7 +95,7 @@ static int evict_one_dcplb(unsigned int cpu) return i; } -static noinline int dcplb_miss(unsigned int cpu) +MGR_ATTR static noinline int dcplb_miss(unsigned int cpu) { unsigned long addr = bfin_read_DCPLB_FAULT_ADDR(); int status = bfin_read_DCPLB_STATUS(); @@ -163,7 +169,7 @@ static noinline int dcplb_miss(unsigned int cpu) return 0; } -static noinline int icplb_miss(unsigned int cpu) +MGR_ATTR static noinline int icplb_miss(unsigned int cpu) { unsigned long addr = bfin_read_ICPLB_FAULT_ADDR(); int status = bfin_read_ICPLB_STATUS(); @@ -269,7 +275,7 @@ static noinline int icplb_miss(unsigned int cpu) return 0; } -static noinline int dcplb_protection_fault(unsigned int cpu) +MGR_ATTR static noinline int dcplb_protection_fault(unsigned int cpu) { int status = bfin_read_DCPLB_STATUS(); @@ -289,7 +295,7 @@ static noinline int dcplb_protection_fault(unsigned int cpu) return CPLB_PROT_VIOL; } -int cplb_hdr(int seqstat, struct pt_regs *regs) +MGR_ATTR int cplb_hdr(int seqstat, struct pt_regs *regs) { int cause = seqstat & 0x3f; unsigned int cpu = raw_smp_processor_id(); -- cgit v1.2.3 From 60ffdb36547da2397d6cfefe9c752ebad16524f6 Mon Sep 17 00:00:00 2001 From: Graf Yang Date: Wed, 20 Jan 2010 10:56:24 +0000 Subject: Blackfin: implement nmi_watchdog for SMP on BF561 Signed-off-by: Graf Yang Signed-off-by: Mike Frysinger --- arch/blackfin/Kconfig.debug | 9 + arch/blackfin/include/asm/irq.h | 4 + arch/blackfin/include/asm/nmi.h | 12 ++ arch/blackfin/include/asm/smp.h | 1 + arch/blackfin/kernel/Makefile | 1 + arch/blackfin/kernel/nmi.c | 313 ++++++++++++++++++++++++++++++++++ arch/blackfin/kernel/time-ts.c | 4 + arch/blackfin/mach-common/interrupt.S | 18 +- 8 files changed, 361 insertions(+), 1 deletion(-) create mode 100644 arch/blackfin/include/asm/nmi.h create mode 100644 arch/blackfin/kernel/nmi.c diff --git a/arch/blackfin/Kconfig.debug b/arch/blackfin/Kconfig.debug index 87f195ee2e06..1460d7b5edc1 100644 --- a/arch/blackfin/Kconfig.debug +++ b/arch/blackfin/Kconfig.debug @@ -238,6 +238,15 @@ config EARLY_PRINTK all of this lives in the init section and is thrown away after the kernel boots completely. +config NMI_WATCHDOG + bool "Enable NMI watchdog to help debugging lockup on SMP" + default n + depends on (SMP && !BFIN_SCRATCH_REG_RETN) + help + If any CPU in the system does not execute the period local timer + interrupt for more than 5 seconds, then the NMI handler dumps debug + information. This information can be used to debug the lockup. + config CPLB_INFO bool "Display the CPLB information" help diff --git a/arch/blackfin/include/asm/irq.h b/arch/blackfin/include/asm/irq.h index 89de539ed010..12f4060a31b0 100644 --- a/arch/blackfin/include/asm/irq.h +++ b/arch/blackfin/include/asm/irq.h @@ -38,4 +38,8 @@ #include +#ifdef CONFIG_NMI_WATCHDOG +# define ARCH_HAS_NMI_WATCHDOG +#endif + #endif /* _BFIN_IRQ_H_ */ diff --git a/arch/blackfin/include/asm/nmi.h b/arch/blackfin/include/asm/nmi.h new file mode 100644 index 000000000000..b9caac4fcfd8 --- /dev/null +++ b/arch/blackfin/include/asm/nmi.h @@ -0,0 +1,12 @@ +/* + * Copyright 2010 Analog Devices Inc. + * + * Licensed under the GPL-2 + */ + +#ifndef _BFIN_NMI_H_ +#define _BFIN_NMI_H_ + +#include + +#endif diff --git a/arch/blackfin/include/asm/smp.h b/arch/blackfin/include/asm/smp.h index 29fb88219470..7f26de09ca9c 100644 --- a/arch/blackfin/include/asm/smp.h +++ b/arch/blackfin/include/asm/smp.h @@ -22,6 +22,7 @@ extern char coreb_trampoline_start, coreb_trampoline_end; struct corelock_slot { int lock; }; +extern struct corelock_slot corelock; void smp_icache_flush_range_others(unsigned long start, unsigned long end); diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile index a8ddbc8ed5af..346a421f1562 100644 --- a/arch/blackfin/kernel/Makefile +++ b/arch/blackfin/kernel/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_CPLB_INFO) += cplbinfo.o obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_KGDB_TESTS) += kgdb_test.o +obj-$(CONFIG_NMI_WATCHDOG) += nmi.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_EARLY_PRINTK) += shadow_console.o obj-$(CONFIG_STACKTRACE) += stacktrace.o diff --git a/arch/blackfin/kernel/nmi.c b/arch/blackfin/kernel/nmi.c new file mode 100644 index 000000000000..19093c17632b --- /dev/null +++ b/arch/blackfin/kernel/nmi.c @@ -0,0 +1,313 @@ +/* + * Blackfin nmi_watchdog Driver + * + * Originally based on bfin_wdt.c + * Copyright 2010-2010 Analog Devices Inc. + * Graff Yang + * + * Enter bugs at http://blackfin.uclinux.org/ + * + * Licensed under the GPL-2 or later. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Bit in WDOG_CTL that indicates watchdog has expired (WDR0) */ +#define WDOG_EXPIRED 0x8000 + +/* Masks for WDEV field in WDOG_CTL register */ +#define ICTL_RESET 0x0 +#define ICTL_NMI 0x2 +#define ICTL_GPI 0x4 +#define ICTL_NONE 0x6 +#define ICTL_MASK 0x6 + +/* Masks for WDEN field in WDOG_CTL register */ +#define WDEN_MASK 0x0FF0 +#define WDEN_ENABLE 0x0000 +#define WDEN_DISABLE 0x0AD0 + +#define DRV_NAME "nmi-wdt" + +#define NMI_WDT_TIMEOUT 5 /* 5 seconds */ +#define NMI_CHECK_TIMEOUT (4 * HZ) /* 4 seconds in jiffies */ +static int nmi_wdt_cpu = 1; + +static unsigned int timeout = NMI_WDT_TIMEOUT; +static int nmi_active; + +static unsigned short wdoga_ctl; +static unsigned int wdoga_cnt; +static struct corelock_slot saved_corelock; +static atomic_t nmi_touched[NR_CPUS]; +static struct timer_list ntimer; + +enum { + COREA_ENTER_NMI = 0, + COREA_EXIT_NMI, + COREB_EXIT_NMI, + + NMI_EVENT_NR, +}; +static unsigned long nmi_event __attribute__ ((__section__(".l2.bss"))); + +/* we are in nmi, non-atomic bit ops is safe */ +static inline void set_nmi_event(int event) +{ + __set_bit(event, &nmi_event); +} + +static inline void wait_nmi_event(int event) +{ + while (!test_bit(event, &nmi_event)) + barrier(); + __clear_bit(event, &nmi_event); +} + +static inline void send_corea_nmi(void) +{ + wdoga_ctl = bfin_read_WDOGA_CTL(); + wdoga_cnt = bfin_read_WDOGA_CNT(); + + bfin_write_WDOGA_CTL(WDEN_DISABLE); + bfin_write_WDOGA_CNT(0); + bfin_write_WDOGA_CTL(WDEN_ENABLE | ICTL_NMI); +} + +static inline void restore_corea_nmi(void) +{ + bfin_write_WDOGA_CTL(WDEN_DISABLE); + bfin_write_WDOGA_CTL(WDOG_EXPIRED | WDEN_DISABLE | ICTL_NONE); + + bfin_write_WDOGA_CNT(wdoga_cnt); + bfin_write_WDOGA_CTL(wdoga_ctl); +} + +static inline void save_corelock(void) +{ + saved_corelock = corelock; + corelock.lock = 0; +} + +static inline void restore_corelock(void) +{ + corelock = saved_corelock; +} + + +static inline void nmi_wdt_keepalive(void) +{ + bfin_write_WDOGB_STAT(0); +} + +static inline void nmi_wdt_stop(void) +{ + bfin_write_WDOGB_CTL(WDEN_DISABLE); +} + +/* before calling this function, you must stop the WDT */ +static inline void nmi_wdt_clear(void) +{ + /* clear TRO bit, disable event generation */ + bfin_write_WDOGB_CTL(WDOG_EXPIRED | WDEN_DISABLE | ICTL_NONE); +} + +static inline void nmi_wdt_start(void) +{ + bfin_write_WDOGB_CTL(WDEN_ENABLE | ICTL_NMI); +} + +static inline int nmi_wdt_running(void) +{ + return ((bfin_read_WDOGB_CTL() & WDEN_MASK) != WDEN_DISABLE); +} + +static inline int nmi_wdt_set_timeout(unsigned long t) +{ + u32 cnt, max_t, sclk; + int run; + + sclk = get_sclk(); + max_t = -1 / sclk; + cnt = t * sclk; + if (t > max_t) { + pr_warning("NMI: timeout value is too large\n"); + return -EINVAL; + } + + run = nmi_wdt_running(); + nmi_wdt_stop(); + bfin_write_WDOGB_CNT(cnt); + if (run) + nmi_wdt_start(); + + timeout = t; + + return 0; +} + +int check_nmi_wdt_touched(void) +{ + unsigned int this_cpu = smp_processor_id(); + unsigned int cpu; + + cpumask_t mask = cpu_online_map; + + if (!atomic_read(&nmi_touched[this_cpu])) + return 0; + + atomic_set(&nmi_touched[this_cpu], 0); + + cpu_clear(this_cpu, mask); + for_each_cpu_mask(cpu, mask) { + invalidate_dcache_range((unsigned long)(&nmi_touched[cpu]), + (unsigned long)(&nmi_touched[cpu])); + if (!atomic_read(&nmi_touched[cpu])) + return 0; + atomic_set(&nmi_touched[cpu], 0); + } + + return 1; +} + +static void nmi_wdt_timer(unsigned long data) +{ + if (check_nmi_wdt_touched()) + nmi_wdt_keepalive(); + + mod_timer(&ntimer, jiffies + NMI_CHECK_TIMEOUT); +} + +static int __init init_nmi_wdt(void) +{ + nmi_wdt_set_timeout(timeout); + nmi_wdt_start(); + nmi_active = true; + + init_timer(&ntimer); + ntimer.function = nmi_wdt_timer; + ntimer.expires = jiffies + NMI_CHECK_TIMEOUT; + add_timer(&ntimer); + + pr_info("nmi_wdt: initialized: timeout=%d sec\n", timeout); + return 0; +} +device_initcall(init_nmi_wdt); + +void touch_nmi_watchdog(void) +{ + atomic_set(&nmi_touched[smp_processor_id()], 1); +} + +/* Suspend/resume support */ +#ifdef CONFIG_PM +static int nmi_wdt_suspend(struct sys_device *dev, pm_message_t state) +{ + nmi_wdt_stop(); + return 0; +} + +static int nmi_wdt_resume(struct sys_device *dev) +{ + if (nmi_active) + nmi_wdt_start(); + return 0; +} + +static struct sysdev_class nmi_sysclass = { + .name = DRV_NAME, + .resume = nmi_wdt_resume, + .suspend = nmi_wdt_suspend, +}; + +static struct sys_device device_nmi_wdt = { + .id = 0, + .cls = &nmi_sysclass, +}; + +static int __init init_nmi_wdt_sysfs(void) +{ + int error; + + if (!nmi_active) + return 0; + + error = sysdev_class_register(&nmi_sysclass); + if (!error) + error = sysdev_register(&device_nmi_wdt); + return error; +} +late_initcall(init_nmi_wdt_sysfs); + +#endif /* CONFIG_PM */ + + +asmlinkage notrace void do_nmi(struct pt_regs *fp) +{ + unsigned int cpu = smp_processor_id(); + nmi_enter(); + + cpu_pda[cpu].__nmi_count += 1; + + if (cpu == nmi_wdt_cpu) { + /* CoreB goes here first */ + + /* reload the WDOG_STAT */ + nmi_wdt_keepalive(); + + /* clear nmi interrupt for CoreB */ + nmi_wdt_stop(); + nmi_wdt_clear(); + + /* trigger NMI interrupt of CoreA */ + send_corea_nmi(); + + /* waiting CoreB to enter NMI */ + wait_nmi_event(COREA_ENTER_NMI); + + /* recover WDOGA's settings */ + restore_corea_nmi(); + + save_corelock(); + + /* corelock is save/cleared, CoreA is dummping messages */ + + wait_nmi_event(COREA_EXIT_NMI); + } else { + /* OK, CoreA entered NMI */ + set_nmi_event(COREA_ENTER_NMI); + } + + pr_emerg("\nNMI Watchdog detected LOCKUP, dump for CPU %d\n", cpu); + dump_bfin_process(fp); + dump_bfin_mem(fp); + show_regs(fp); + dump_bfin_trace_buffer(); + show_stack(current, (unsigned long *)fp); + + if (cpu == nmi_wdt_cpu) { + pr_emerg("This fault is not recoverable, sorry!\n"); + + /* CoreA dump finished, restore the corelock */ + restore_corelock(); + + set_nmi_event(COREB_EXIT_NMI); + } else { + /* CoreB dump finished, notice the CoreA we are done */ + set_nmi_event(COREA_EXIT_NMI); + + /* synchronize with CoreA */ + wait_nmi_event(COREB_EXIT_NMI); + } + + nmi_exit(); +} diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c index a351f97c87a3..41a907596c70 100644 --- a/arch/blackfin/kernel/time-ts.c +++ b/arch/blackfin/kernel/time-ts.c @@ -21,6 +21,7 @@ #include #include #include +#include /* Accelerators for sched_clock() * convert from cycles(64bits) => nanoseconds (64bits) @@ -309,6 +310,9 @@ irqreturn_t bfin_coretmr_interrupt(int irq, void *dev_id) smp_mb(); evt->event_handler(evt); + + touch_nmi_watchdog(); + return IRQ_HANDLED; } diff --git a/arch/blackfin/mach-common/interrupt.S b/arch/blackfin/mach-common/interrupt.S index 0a0c088ead8c..cee62cf4acd4 100644 --- a/arch/blackfin/mach-common/interrupt.S +++ b/arch/blackfin/mach-common/interrupt.S @@ -194,12 +194,28 @@ ENTRY(_evt_ivhw) ENDPROC(_evt_ivhw) /* Interrupt routine for evt2 (NMI). - * We don't actually use this, so just return. * For inner circle type details, please see: * http://docs.blackfin.uclinux.org/doku.php?id=linux-kernel:nmi */ ENTRY(_evt_nmi) +#ifndef CONFIG_NMI_WATCHDOG .weak _evt_nmi +#else + /* Not take account of CPLBs, this handler will not return */ + SAVE_ALL_SYS + r0 = sp; + r1 = retn; + [sp + PT_PC] = r1; + trace_buffer_save(p4,r5); + + ANOMALY_283_315_WORKAROUND(p4, r5) + + SP += -12; + call _do_nmi; + SP += 12; +1: + jump 1b; +#endif rtn; ENDPROC(_evt_nmi) -- cgit v1.2.3 From 6388d14eb2dd3af655cee28ca2a1c56881e63e56 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Thu, 21 Jan 2010 07:20:44 +0000 Subject: Blackfin: implement ndelay() Signed-off-by: Barry Song Signed-off-by: Mike Frysinger --- arch/blackfin/include/asm/delay.h | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/arch/blackfin/include/asm/delay.h b/arch/blackfin/include/asm/delay.h index c31f91cc1d5d..171d8deb04a5 100644 --- a/arch/blackfin/include/asm/delay.h +++ b/arch/blackfin/include/asm/delay.h @@ -30,10 +30,22 @@ __asm__ __volatile__ ( #define HZSCALE (268435456 / (1000000/HZ)) -static inline void udelay(unsigned long usecs) +static inline unsigned long __to_delay(unsigned long scale) { extern unsigned long loops_per_jiffy; - __delay((((usecs * HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6); + return (((scale * HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6; +} + +static inline void udelay(unsigned long usecs) +{ + __delay(__to_delay(usecs)); } +static inline void ndelay(unsigned long nsecs) +{ + __delay(__to_delay(1) * nsecs / 1000); +} + +#define ndelay ndelay + #endif -- cgit v1.2.3 From aebfef03249819886a7f9c981940cbd48d82ea47 Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Fri, 22 Jan 2010 07:35:20 -0500 Subject: Blackfin: implement ftrace mcount test Signed-off-by: Mike Frysinger --- arch/blackfin/Kconfig | 1 + arch/blackfin/kernel/ftrace-entry.S | 11 ++++++++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index f46db59eac8a..3123aa49ff79 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig @@ -25,6 +25,7 @@ config BLACKFIN def_bool y select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_TRACER + select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_IDE select HAVE_KERNEL_GZIP if RAMKERNEL select HAVE_KERNEL_BZIP2 if RAMKERNEL diff --git a/arch/blackfin/kernel/ftrace-entry.S b/arch/blackfin/kernel/ftrace-entry.S index 76dd4fbcd17a..db3a51b0af21 100644 --- a/arch/blackfin/kernel/ftrace-entry.S +++ b/arch/blackfin/kernel/ftrace-entry.S @@ -1,7 +1,7 @@ /* * mcount and friends -- ftrace stuff * - * Copyright (C) 2009 Analog Devices Inc. + * Copyright (C) 2009-2010 Analog Devices Inc. * Licensed under the GPL-2 or later. */ @@ -21,6 +21,15 @@ * function will be waiting there. mmmm pie. */ ENTRY(__mcount) +#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST + /* optional micro optimization: return if stopped */ + p1.l = _function_trace_stop; + p1.h = _function_trace_stop; + r3 = [p1]; + cc = r3 == 0; + if ! cc jump _ftrace_stub (bp); +#endif + /* save third function arg early so we can do testing below */ [--sp] = r2; -- cgit v1.2.3 From b73faf74493fd1bc75a8938aa5d296facf50a650 Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Fri, 22 Jan 2010 07:59:32 -0500 Subject: Blackfin: support new ftrace frame pointer semantics Signed-off-by: Mike Frysinger --- arch/blackfin/kernel/ftrace-entry.S | 12 +++++++++--- arch/blackfin/kernel/ftrace.c | 6 ++++-- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/arch/blackfin/kernel/ftrace-entry.S b/arch/blackfin/kernel/ftrace-entry.S index db3a51b0af21..d66446b572c0 100644 --- a/arch/blackfin/kernel/ftrace-entry.S +++ b/arch/blackfin/kernel/ftrace-entry.S @@ -115,9 +115,12 @@ ENTRY(_ftrace_graph_caller) [--sp] = r1; [--sp] = rets; - /* prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) */ - r0 = sp; - r1 = rets; + /* prepare_ftrace_return(parent, self_addr, frame_pointer) */ + r0 = sp; /* unsigned long *parent */ + r1 = rets; /* unsigned long self_addr */ +#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST + r2 = fp; /* unsigned long frame_pointer */ +#endif r0 += 16; /* skip the 4 local regs on stack */ r1 += -MCOUNT_INSN_SIZE; call _prepare_ftrace_return; @@ -136,6 +139,9 @@ ENTRY(_return_to_handler) [--sp] = r1; /* get original return address */ +#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST + r0 = fp; /* Blackfin is sane, so omit this */ +#endif call _ftrace_return_to_handler; rets = r0; diff --git a/arch/blackfin/kernel/ftrace.c b/arch/blackfin/kernel/ftrace.c index f2c85ac6f2da..a61d948ea925 100644 --- a/arch/blackfin/kernel/ftrace.c +++ b/arch/blackfin/kernel/ftrace.c @@ -16,7 +16,8 @@ * Hook the return address and push it in the stack of return addrs * in current thread info. */ -void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) +void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, + unsigned long frame_pointer) { struct ftrace_graph_ent trace; unsigned long return_hooker = (unsigned long)&return_to_handler; @@ -24,7 +25,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) if (unlikely(atomic_read(¤t->tracing_graph_pause))) return; - if (ftrace_push_return_trace(*parent, self_addr, &trace.depth, 0) == -EBUSY) + if (ftrace_push_return_trace(*parent, self_addr, &trace.depth, + frame_pointer) == -EBUSY) return; trace.func = self_addr; -- cgit v1.2.3 From 3750411fb7ca96dd11ce8a63f213b60bfe4b5c29 Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Fri, 22 Jan 2010 09:11:20 -0500 Subject: Blackfin: fix whitespace damage in thread_info.h Signed-off-by: Mike Frysinger --- arch/blackfin/include/asm/thread_info.h | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/arch/blackfin/include/asm/thread_info.h b/arch/blackfin/include/asm/thread_info.h index a40d9368c38a..0149c92ceeb0 100644 --- a/arch/blackfin/include/asm/thread_info.h +++ b/arch/blackfin/include/asm/thread_info.h @@ -1,5 +1,5 @@ /* - * Copyright 2004-2009 Analog Devices Inc. + * Copyright 2004-2010 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ @@ -17,7 +17,7 @@ /* Thread Align Mask to reach to the top of the stack * for any process */ -#define ALIGN_PAGE_MASK 0xffffe000 +#define ALIGN_PAGE_MASK 0xffffe000 /* * Size of kernel stack for each process. This must be a power of 2... @@ -57,7 +57,7 @@ struct thread_info { .exec_domain = &default_exec_domain, \ .flags = 0, \ .cpu = 0, \ - .preempt_count = INIT_PREEMPT_COUNT, \ + .preempt_count = INIT_PREEMPT_COUNT, \ .restart_block = { \ .fn = do_no_restart_syscall, \ }, \ @@ -73,8 +73,7 @@ __attribute_const__ static inline struct thread_info *current_thread_info(void) { struct thread_info *ti; - __asm__("%0 = sp;" : "=da"(ti) : - ); + __asm__("%0 = sp;" : "=da"(ti)); return (struct thread_info *)((long)ti & ~((long)THREAD_SIZE-1)); } @@ -99,21 +98,21 @@ static inline struct thread_info *current_thread_info(void) #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ #define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling TIF_NEED_RESCHED */ -#define TIF_MEMDIE 4 +#define TIF_MEMDIE 4 #define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ -#define TIF_FREEZE 6 /* is freezing for suspend */ -#define TIF_IRQ_SYNC 7 /* sync pipeline stage */ -#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ +#define TIF_FREEZE 6 /* is freezing for suspend */ +#define TIF_IRQ_SYNC 7 /* sync pipeline stage */ +#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ /* as above, but as bit values */ #define _TIF_SYSCALL_TRACE (1< Date: Fri, 22 Jan 2010 10:07:30 +0000 Subject: Blackfin: add dma_disable_irq_nosync() API for irq handlers Some IRQ handlers need to disable a DMA channel without waiting. Signed-off-by: Barry Song Signed-off-by: Mike Frysinger --- arch/blackfin/include/asm/dma.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/blackfin/include/asm/dma.h b/arch/blackfin/include/asm/dma.h index bd2e62243abe..2c09b1d50ec9 100644 --- a/arch/blackfin/include/asm/dma.h +++ b/arch/blackfin/include/asm/dma.h @@ -262,6 +262,10 @@ static inline void dma_disable_irq(unsigned int channel) { disable_irq(dma_ch[channel].irq); } +static inline void dma_disable_irq_nosync(unsigned int channel) +{ + disable_irq_nosync(dma_ch[channel].irq); +} static inline void dma_enable_irq(unsigned int channel) { enable_irq(dma_ch[channel].irq); -- cgit v1.2.3 From 64b33a00dc39dceac04dbd7622ab14017e074712 Mon Sep 17 00:00:00 2001 From: Graf Yang Date: Mon, 25 Jan 2010 03:47:43 +0000 Subject: Blackfin: split watchdog definitions into a dedicated header file This allows things to be shared between the different watchdog sources. Signed-off-by: Graf Yang Signed-off-by: Mike Frysinger --- arch/blackfin/include/asm/bfin_watchdog.h | 30 ++++++++++++++++++++++++++++++ arch/blackfin/kernel/nmi.c | 16 +--------------- 2 files changed, 31 insertions(+), 15 deletions(-) create mode 100644 arch/blackfin/include/asm/bfin_watchdog.h diff --git a/arch/blackfin/include/asm/bfin_watchdog.h b/arch/blackfin/include/asm/bfin_watchdog.h new file mode 100644 index 000000000000..dce09829a095 --- /dev/null +++ b/arch/blackfin/include/asm/bfin_watchdog.h @@ -0,0 +1,30 @@ +/* + * bfin_watchdog.h - Blackfin watchdog definitions + * + * Copyright 2006-2010 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#ifndef _BFIN_WATCHDOG_H +#define _BFIN_WATCHDOG_H + +/* Bit in SWRST that indicates boot caused by watchdog */ +#define SWRST_RESET_WDOG 0x4000 + +/* Bit in WDOG_CTL that indicates watchdog has expired (WDR0) */ +#define WDOG_EXPIRED 0x8000 + +/* Masks for WDEV field in WDOG_CTL register */ +#define ICTL_RESET 0x0 +#define ICTL_NMI 0x2 +#define ICTL_GPI 0x4 +#define ICTL_NONE 0x6 +#define ICTL_MASK 0x6 + +/* Masks for WDEN field in WDOG_CTL register */ +#define WDEN_MASK 0x0FF0 +#define WDEN_ENABLE 0x0000 +#define WDEN_DISABLE 0x0AD0 + +#endif diff --git a/arch/blackfin/kernel/nmi.c b/arch/blackfin/kernel/nmi.c index 19093c17632b..0b5f72f17fd0 100644 --- a/arch/blackfin/kernel/nmi.c +++ b/arch/blackfin/kernel/nmi.c @@ -20,21 +20,7 @@ #include #include #include - -/* Bit in WDOG_CTL that indicates watchdog has expired (WDR0) */ -#define WDOG_EXPIRED 0x8000 - -/* Masks for WDEV field in WDOG_CTL register */ -#define ICTL_RESET 0x0 -#define ICTL_NMI 0x2 -#define ICTL_GPI 0x4 -#define ICTL_NONE 0x6 -#define ICTL_MASK 0x6 - -/* Masks for WDEN field in WDOG_CTL register */ -#define WDEN_MASK 0x0FF0 -#define WDEN_ENABLE 0x0000 -#define WDEN_DISABLE 0x0AD0 +#include #define DRV_NAME "nmi-wdt" -- cgit v1.2.3 From 652afdc3403cbccb93c7e6db582a1204a9e5e90a Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Mon, 25 Jan 2010 22:12:32 +0000 Subject: Blackfin: move KGDB selection to the way other arches do it Signed-off-by: Mike Frysinger --- arch/blackfin/Kconfig | 1 + arch/blackfin/Kconfig.debug | 3 --- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index 3123aa49ff79..ed339b83f5af 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig @@ -23,6 +23,7 @@ config RWSEM_XCHGADD_ALGORITHM config BLACKFIN def_bool y + select HAVE_ARCH_KGDB select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACE_MCOUNT_TEST diff --git a/arch/blackfin/Kconfig.debug b/arch/blackfin/Kconfig.debug index 1460d7b5edc1..aec89a5280b2 100644 --- a/arch/blackfin/Kconfig.debug +++ b/arch/blackfin/Kconfig.debug @@ -18,9 +18,6 @@ config DEBUG_STACK_USAGE This option will slow down process creation somewhat. -config HAVE_ARCH_KGDB - def_bool y - config DEBUG_VERBOSE bool "Verbose fault messages" default y -- cgit v1.2.3 From 7136d9c5e874813ccbd1d438924c413b7305944c Mon Sep 17 00:00:00 2001 From: Yi Li Date: Tue, 26 Jan 2010 04:02:44 +0000 Subject: Blackfin: add CALLER_ADDR ftrace macros Since GCC doesn't support __builtin_frame_address(n) where n!=0, add our own function to walk the stack frame pointers. Signed-off-by: Yi Li Signed-off-by: Mike Frysinger --- arch/blackfin/include/asm/ftrace.h | 53 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/arch/blackfin/include/asm/ftrace.h b/arch/blackfin/include/asm/ftrace.h index 90c9b400ba6d..4cfe2d9ba7e8 100644 --- a/arch/blackfin/include/asm/ftrace.h +++ b/arch/blackfin/include/asm/ftrace.h @@ -10,4 +10,57 @@ #define MCOUNT_INSN_SIZE 6 /* sizeof "[++sp] = rets; call __mcount;" */ +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_FRAME_POINTER +#include + +extern inline void *return_address(unsigned int level) +{ + unsigned long *endstack, *fp, *ret_addr; + unsigned int current_level = 0; + + if (level == 0) + return __builtin_return_address(0); + + fp = (unsigned long *)__builtin_frame_address(0); + endstack = (unsigned long *)PAGE_ALIGN((unsigned long)&level); + + while (((unsigned long)fp & 0x3) == 0 && fp && + (fp + 1) < endstack && current_level < level) { + fp = (unsigned long *)*fp; + current_level++; + } + + if (((unsigned long)fp & 0x3) == 0 && fp && + (fp + 1) < endstack) + ret_addr = (unsigned long *)*(fp + 1); + else + ret_addr = NULL; + + return ret_addr; +} + +#else + +extern inline void *return_address(unsigned int level) +{ + return NULL; +} + +#endif /* CONFIG_FRAME_POINTER */ + +#define HAVE_ARCH_CALLER_ADDR + +/* inline function or macro may lead to unexpected result */ +#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) +#define CALLER_ADDR1 ((unsigned long)return_address(1)) +#define CALLER_ADDR2 ((unsigned long)return_address(2)) +#define CALLER_ADDR3 ((unsigned long)return_address(3)) +#define CALLER_ADDR4 ((unsigned long)return_address(4)) +#define CALLER_ADDR5 ((unsigned long)return_address(5)) +#define CALLER_ADDR6 ((unsigned long)return_address(6)) + +#endif /* __ASSEMBLY__ */ + #endif -- cgit v1.2.3 From 3b82790c12f8122d3df07cc387d2d74355e54c75 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Wed, 27 Jan 2010 09:01:36 +0000 Subject: Blackfin: rename AD1938 to AD193X in board files The ASoC codec driver was generalized and renamed, so update the board resources accordingly. Signed-off-by: Barry Song Signed-off-by: Mike Frysinger --- arch/blackfin/mach-bf537/boards/stamp.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c index 80b3b577f74e..2e354b5c18ee 100644 --- a/arch/blackfin/mach-bf537/boards/stamp.c +++ b/arch/blackfin/mach-bf537/boards/stamp.c @@ -528,8 +528,8 @@ static struct bfin5xx_spi_chip ad1836_spi_chip_info = { }; #endif -#if defined(CONFIG_SND_BF5XX_SOC_AD1938) \ - || defined(CONFIG_SND_BF5XX_SOC_AD1938_MODULE) +#if defined(CONFIG_SND_BF5XX_SOC_AD193X) \ + || defined(CONFIG_SND_BF5XX_SOC_AD193X_MODULE) static struct bfin5xx_spi_chip ad1938_spi_chip_info = { .enable_dma = 0, .bits_per_word = 8, @@ -915,9 +915,9 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { }, #endif -#if defined(CONFIG_SND_BF5XX_SOC_AD1938) || defined(CONFIG_SND_BF5XX_SOC_AD1938_MODULE) +#if defined(CONFIG_SND_BF5XX_SOC_AD193X) || defined(CONFIG_SND_BF5XX_SOC_AD193X_MODULE) { - .modalias = "ad1938", + .modalias = "ad193x", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 5, -- cgit v1.2.3 From 6c2b7072a7035837998da38809f98e4182e4c41c Mon Sep 17 00:00:00 2001 From: Graf Yang Date: Wed, 27 Jan 2010 11:16:32 +0000 Subject: Blackfin: add support for cpufreq on SMP systems Signed-off-by: Graf Yang Signed-off-by: Mike Frysinger --- arch/blackfin/Kconfig | 1 - arch/blackfin/include/asm/time.h | 7 +- arch/blackfin/kernel/time-ts.c | 4 + arch/blackfin/mach-common/cpufreq.c | 155 +++++++++++++++++++++--------------- 4 files changed, 101 insertions(+), 66 deletions(-) diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index ed339b83f5af..970df5b5c525 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig @@ -1237,7 +1237,6 @@ config PM_BFIN_WAKE_GP endmenu menu "CPU Frequency scaling" - depends on !SMP source "drivers/cpufreq/Kconfig" diff --git a/arch/blackfin/include/asm/time.h b/arch/blackfin/include/asm/time.h index 767b938ccf8c..9ca7db844d10 100644 --- a/arch/blackfin/include/asm/time.h +++ b/arch/blackfin/include/asm/time.h @@ -23,9 +23,7 @@ */ #ifndef CONFIG_CPU_FREQ -#define TIME_SCALE 1 -#define __bfin_cycles_off (0) -#define __bfin_cycles_mod (0) +# define TIME_SCALE 1 #else /* * Blackfin CPU frequency scaling supports max Core Clock 1, 1/2 and 1/4 . @@ -33,8 +31,11 @@ * adjust the Core Timer Presale Register. This way we don't lose time. */ #define TIME_SCALE 4 + +# ifdef CONFIG_CYCLES_CLOCKSOURCE extern unsigned long long __bfin_cycles_off; extern unsigned int __bfin_cycles_mod; +# endif #endif #if defined(CONFIG_TICKSOURCE_CORETMR) diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c index 41a907596c70..cb7a01d4f009 100644 --- a/arch/blackfin/kernel/time-ts.c +++ b/arch/blackfin/kernel/time-ts.c @@ -51,7 +51,11 @@ static notrace cycle_t bfin_read_cycles(struct clocksource *cs) { +#ifdef CONFIG_CPU_FREQ return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod); +#else + return get_cycles(); +#endif } static struct clocksource bfin_cs_cycles = { diff --git a/arch/blackfin/mach-common/cpufreq.c b/arch/blackfin/mach-common/cpufreq.c index 777582897253..5d7f8ab5509a 100644 --- a/arch/blackfin/mach-common/cpufreq.c +++ b/arch/blackfin/mach-common/cpufreq.c @@ -15,6 +15,8 @@ #include #include +#define CPUFREQ_CPU 0 + /* this is the table of CCLK frequencies, in Hz */ /* .index is the entry in the auxillary dpm_state_table[] */ static struct cpufreq_frequency_table bfin_freq_table[] = { @@ -41,64 +43,114 @@ static struct bfin_dpm_state { unsigned int tscale; /* change the divider on the core timer interrupt */ } dpm_state_table[3]; +#if defined(CONFIG_CYCLES_CLOCKSOURCE) /* - normalized to maximum frequncy offset for CYCLES, - used in time-ts cycles clock source, but could be used - somewhere also. + * normalized to maximum frequncy offset for CYCLES, + * used in time-ts cycles clock source, but could be used + * somewhere also. */ unsigned long long __bfin_cycles_off; unsigned int __bfin_cycles_mod; +#endif /**************************************************************************/ +static void __init bfin_init_tables(unsigned long cclk, unsigned long sclk) +{ -static unsigned int bfin_getfreq_khz(unsigned int cpu) + unsigned long csel, min_cclk; + int index; + + /* Anomaly 273 seems to still exist on non-BF54x w/dcache turned on */ +#if ANOMALY_05000273 || ANOMALY_05000274 || \ + (!defined(CONFIG_BF54x) && defined(CONFIG_BFIN_EXTMEM_DCACHEABLE)) + min_cclk = sclk * 2; +#else + min_cclk = sclk; +#endif + csel = ((bfin_read_PLL_DIV() & CSEL) >> 4); + + for (index = 0; (cclk >> index) >= min_cclk && csel <= 3; index++, csel++) { + bfin_freq_table[index].frequency = cclk >> index; + dpm_state_table[index].csel = csel << 4; /* Shift now into PLL_DIV bitpos */ + dpm_state_table[index].tscale = (TIME_SCALE / (1 << csel)) - 1; + + pr_debug("cpufreq: freq:%d csel:0x%x tscale:%d\n", + bfin_freq_table[index].frequency, + dpm_state_table[index].csel, + dpm_state_table[index].tscale); + } + return; +} + +static void bfin_adjust_core_timer(void *info) { - /* The driver only support single cpu */ - if (cpu != 0) - return -1; + unsigned int tscale; + unsigned int index = *(unsigned int *)info; + + /* we have to adjust the core timer, because it is using cclk */ + tscale = dpm_state_table[index].tscale; + bfin_write_TSCALE(tscale); + return; +} +static unsigned int bfin_getfreq_khz(unsigned int cpu) +{ + /* Both CoreA/B have the same core clock */ return get_cclk() / 1000; } -static int bfin_target(struct cpufreq_policy *policy, +static int bfin_target(struct cpufreq_policy *poli, unsigned int target_freq, unsigned int relation) { - unsigned int index, plldiv, tscale; + unsigned int index, plldiv, cpu; unsigned long flags, cclk_hz; struct cpufreq_freqs freqs; +#if defined(CONFIG_CYCLES_CLOCKSOURCE) cycles_t cycles; +#endif - if (cpufreq_frequency_table_target(policy, bfin_freq_table, - target_freq, relation, &index)) - return -EINVAL; - - cclk_hz = bfin_freq_table[index].frequency; - - freqs.old = bfin_getfreq_khz(0); - freqs.new = cclk_hz; - freqs.cpu = 0; - - pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n", - cclk_hz, target_freq, freqs.old); - - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - local_irq_save_hw(flags); - plldiv = (bfin_read_PLL_DIV() & SSEL) | dpm_state_table[index].csel; - tscale = dpm_state_table[index].tscale; - bfin_write_PLL_DIV(plldiv); - /* we have to adjust the core timer, because it is using cclk */ - bfin_write_TSCALE(tscale); - cycles = get_cycles(); - SSYNC(); - cycles += 10; /* ~10 cycles we lose after get_cycles() */ - __bfin_cycles_off += (cycles << __bfin_cycles_mod) - (cycles << index); - __bfin_cycles_mod = index; - local_irq_restore_hw(flags); - /* TODO: just test case for cycles clock source, remove later */ - pr_debug("cpufreq: done\n"); - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + for_each_online_cpu(cpu) { + struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); + + if (!policy) + continue; + + if (cpufreq_frequency_table_target(policy, bfin_freq_table, + target_freq, relation, &index)) + return -EINVAL; + + cclk_hz = bfin_freq_table[index].frequency; + + freqs.old = bfin_getfreq_khz(0); + freqs.new = cclk_hz; + freqs.cpu = cpu; + + pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n", + cclk_hz, target_freq, freqs.old); + + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + if (cpu == CPUFREQ_CPU) { + local_irq_save_hw(flags); + plldiv = (bfin_read_PLL_DIV() & SSEL) | + dpm_state_table[index].csel; + bfin_write_PLL_DIV(plldiv); + on_each_cpu(bfin_adjust_core_timer, &index, 1); +#if defined(CONFIG_CYCLES_CLOCKSOURCE) + cycles = get_cycles(); + SSYNC(); + cycles += 10; /* ~10 cycles we lose after get_cycles() */ + __bfin_cycles_off += + (cycles << __bfin_cycles_mod) - (cycles << index); + __bfin_cycles_mod = index; +#endif + local_irq_restore_hw(flags); + } + /* TODO: just test case for cycles clock source, remove later */ + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + } + pr_debug("cpufreq: done\n"); return 0; } @@ -110,37 +162,16 @@ static int bfin_verify_speed(struct cpufreq_policy *policy) static int __init __bfin_cpu_init(struct cpufreq_policy *policy) { - unsigned long cclk, sclk, csel, min_cclk; - int index; - - if (policy->cpu != 0) - return -EINVAL; + unsigned long cclk, sclk; cclk = get_cclk() / 1000; sclk = get_sclk() / 1000; -#if ANOMALY_05000273 || ANOMALY_05000274 || \ - (!defined(CONFIG_BF54x) && defined(CONFIG_BFIN_EXTMEM_DCACHEABLE)) - min_cclk = sclk * 2; -#else - min_cclk = sclk; -#endif - csel = ((bfin_read_PLL_DIV() & CSEL) >> 4); - - for (index = 0; (cclk >> index) >= min_cclk && csel <= 3; index++, csel++) { - bfin_freq_table[index].frequency = cclk >> index; - dpm_state_table[index].csel = csel << 4; /* Shift now into PLL_DIV bitpos */ - dpm_state_table[index].tscale = (TIME_SCALE / (1 << csel)) - 1; - - pr_debug("cpufreq: freq:%d csel:0x%x tscale:%d\n", - bfin_freq_table[index].frequency, - dpm_state_table[index].csel, - dpm_state_table[index].tscale); - } + if (policy->cpu == CPUFREQ_CPU) + bfin_init_tables(cclk, sclk); policy->cpuinfo.transition_latency = 50000; /* 50us assumed */ - /*Now ,only support one cpu */ policy->cur = cclk; cpufreq_frequency_table_get_attr(bfin_freq_table, policy->cpu); return cpufreq_frequency_table_cpuinfo(policy, bfin_freq_table); -- cgit v1.2.3 From 718340f62900ed44046d2b0f74d0dec7cf844194 Mon Sep 17 00:00:00 2001 From: Graf Yang Date: Mon, 1 Feb 2010 06:07:50 +0000 Subject: Blackfin: rewrite resync_core_{i,d}cache() SMP logic to avoid per_cpu data This functions are implicitly called by core functions like cpu_relax(), and since those functions may be called early on before common code has initialized the per-cpu data area, we need to tweak the stats gathering. Now the statistics are maintained in common bss which makes these funcs safe to use as soon as the C runtime env is setup. Signed-off-by: Graf Yang Signed-off-by: Mike Frysinger --- arch/blackfin/include/asm/cpu.h | 2 -- arch/blackfin/include/asm/smp.h | 7 +++++++ arch/blackfin/kernel/setup.c | 4 ++-- arch/blackfin/mach-common/smp.c | 6 ++++-- 4 files changed, 13 insertions(+), 6 deletions(-) diff --git a/arch/blackfin/include/asm/cpu.h b/arch/blackfin/include/asm/cpu.h index b191dc662bd8..16883e582e3c 100644 --- a/arch/blackfin/include/asm/cpu.h +++ b/arch/blackfin/include/asm/cpu.h @@ -17,8 +17,6 @@ struct blackfin_cpudata { struct task_struct *idle; unsigned int imemctl; unsigned int dmemctl; - unsigned long dcache_invld_count; - unsigned long icache_invld_count; }; DECLARE_PER_CPU(struct blackfin_cpudata, cpu_data); diff --git a/arch/blackfin/include/asm/smp.h b/arch/blackfin/include/asm/smp.h index 7f26de09ca9c..f5b537967116 100644 --- a/arch/blackfin/include/asm/smp.h +++ b/arch/blackfin/include/asm/smp.h @@ -24,6 +24,13 @@ struct corelock_slot { }; extern struct corelock_slot corelock; +#ifdef __ARCH_SYNC_CORE_ICACHE +extern unsigned long icache_invld_count[NR_CPUS]; +#endif +#ifdef __ARCH_SYNC_CORE_DCACHE +extern unsigned long dcache_invld_count[NR_CPUS]; +#endif + void smp_icache_flush_range_others(unsigned long start, unsigned long end); #ifdef CONFIG_HOTPLUG_CPU diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c index b54ba45db5f1..8e2efceb364b 100644 --- a/arch/blackfin/kernel/setup.c +++ b/arch/blackfin/kernel/setup.c @@ -1239,10 +1239,10 @@ static int show_cpuinfo(struct seq_file *m, void *v) dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS, BFIN_DLINES); #ifdef __ARCH_SYNC_CORE_DCACHE - seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", cpudata->dcache_invld_count); + seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", dcache_invld_count[cpu_num]); #endif #ifdef __ARCH_SYNC_CORE_ICACHE - seq_printf(m, "SMP Icache Flushes\t: %lu\n\n", cpudata->icache_invld_count); + seq_printf(m, "SMP Icache Flushes\t: %lu\n\n", icache_invld_count[cpu_num]); #endif if (cpu_num != num_possible_cpus() - 1) diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c index efc47ffd066d..7803f22d2ca7 100644 --- a/arch/blackfin/mach-common/smp.c +++ b/arch/blackfin/mach-common/smp.c @@ -474,24 +474,26 @@ void smp_icache_flush_range_others(unsigned long start, unsigned long end) EXPORT_SYMBOL_GPL(smp_icache_flush_range_others); #ifdef __ARCH_SYNC_CORE_ICACHE +unsigned long icache_invld_count[NR_CPUS]; void resync_core_icache(void) { unsigned int cpu = get_cpu(); blackfin_invalidate_entire_icache(); - ++per_cpu(cpu_data, cpu).icache_invld_count; + icache_invld_count[cpu]++; put_cpu(); } EXPORT_SYMBOL(resync_core_icache); #endif #ifdef __ARCH_SYNC_CORE_DCACHE +unsigned long dcache_invld_count[NR_CPUS]; unsigned long barrier_mask __attribute__ ((__section__(".l2.bss"))); void resync_core_dcache(void) { unsigned int cpu = get_cpu(); blackfin_invalidate_entire_dcache(); - ++per_cpu(cpu_data, cpu).dcache_invld_count; + dcache_invld_count[cpu]++; put_cpu(); } EXPORT_SYMBOL(resync_core_dcache); -- cgit v1.2.3 From ad6720c0b52ae1cde0bc567e57a71acde1a292aa Mon Sep 17 00:00:00 2001 From: Barry Song Date: Wed, 3 Feb 2010 09:15:31 +0000 Subject: Blackfin: bf537-stamp: add example AD715{0,2}/AD774{5,6,7} IIO resources Signed-off-by: Barry Song Signed-off-by: Mike Frysinger --- arch/blackfin/mach-bf537/boards/stamp.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c index 2e354b5c18ee..9bc7bd3f35f8 100644 --- a/arch/blackfin/mach-bf537/boards/stamp.c +++ b/arch/blackfin/mach-bf537/boards/stamp.c @@ -1608,6 +1608,26 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = { .platform_data = (void *)&ad7142_i2c_platform_data, }, #endif + +#if defined(CONFIG_AD7150) || defined(CONFIG_AD7150_MODULE) + { + I2C_BOARD_INFO("ad7150", 0x48), + .irq = IRQ_PG5, /* fixme: use real interrupt number */ + }, +#endif + +#if defined(CONFIG_AD7152) || defined(CONFIG_AD7152_MODULE) + { + I2C_BOARD_INFO("ad7152", 0x48), + }, +#endif + +#if defined(CONFIG_AD774X) || defined(CONFIG_AD774X_MODULE) + { + I2C_BOARD_INFO("ad774x", 0x48), + }, +#endif + #if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE) { I2C_BOARD_INFO("pcf8574_lcd", 0x22), -- cgit v1.2.3 From f5b99627a3065858ad5c678703ed7af5363dca39 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 Feb 2010 09:15:57 +0000 Subject: Blackfin: use generic ptrace_resume code Use the generic ptrace_resume code for PTRACE_SYSCALL, PTRACE_CONT, PTRACE_KILL and PTRACE_SINGLESTEP. This implies defining arch_has_single_step in and implementing the user_enable_single_step and user_disable_single_step functions, which also causes the breakpoint information to be cleared on fork, which could be considered a bug fix. Also the TIF_SYSCALL_TRACE thread flag is now cleared on PTRACE_KILL which it previously wasn't which is consistent with all architectures using the modern ptrace code. Signed-off-by: Christoph Hellwig Signed-off-by: Mike Frysinger --- arch/blackfin/kernel/ptrace.c | 44 ------------------------------------------- 1 file changed, 44 deletions(-) diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c index 65567dc4b9f5..895302df21c1 100644 --- a/arch/blackfin/kernel/ptrace.c +++ b/arch/blackfin/kernel/ptrace.c @@ -354,50 +354,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) ret = put_reg(child, addr, data); break; - case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ - case PTRACE_CONT: /* restart after signal. */ - pr_debug("ptrace: syscall/cont\n"); - - ret = -EIO; - if (!valid_signal(data)) - break; - if (request == PTRACE_SYSCALL) - set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - else - clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - child->exit_code = data; - ptrace_disable(child); - pr_debug("ptrace: before wake_up_process\n"); - wake_up_process(child); - ret = 0; - break; - - /* - * make the child exit. Best I can do is send it a sigkill. - * perhaps it should be put in the status that it wants to - * exit. - */ - case PTRACE_KILL: - ret = 0; - if (child->exit_state == EXIT_ZOMBIE) /* already dead */ - break; - child->exit_code = SIGKILL; - ptrace_disable(child); - wake_up_process(child); - break; - - case PTRACE_SINGLESTEP: /* set the trap flag. */ - pr_debug("ptrace: single step\n"); - ret = -EIO; - if (!valid_signal(data)) - break; - clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - ptrace_enable(child); - child->exit_code = data; - wake_up_process(child); - ret = 0; - break; - case PTRACE_GETREGS: /* Get all gp regs from the child. */ ret = ptrace_getregs(child, datap); -- cgit v1.2.3 From 5f09c77d2ad69397498b6555f0d3cd697304253c Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Sun, 14 Feb 2010 22:49:59 +0000 Subject: Blackfin: simplify SYSCFG code a bit and ignore attempts to change it We don't want to let user space modify the SYSCFG register arbitrarily as the settings are system wide (SNEN/CNEN) and can cause misbehavior. The only other bit here (SSSTEP) has proper controls via PTRACE_SINGLESTEP. Signed-off-by: Mike Frysinger --- arch/blackfin/kernel/ptrace.c | 24 ++++++------------------ 1 file changed, 6 insertions(+), 18 deletions(-) diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c index 895302df21c1..14118e40f18f 100644 --- a/arch/blackfin/kernel/ptrace.c +++ b/arch/blackfin/kernel/ptrace.c @@ -31,12 +31,6 @@ * in exit.c or in signal.c. */ -/* determines which bits in the SYSCFG reg the user has access to. */ -/* 1 = access 0 = no access */ -#define SYSCFG_MASK 0x0007 /* SYSCFG reg */ -/* sets the trace bits. */ -#define TRACE_BITS 0x0001 - /* Find the stack offset for a register, relative to thread.esp0. */ #define PT_REG(reg) ((long)&((struct pt_regs *)0)->reg) @@ -162,9 +156,8 @@ static inline int is_user_addr_valid(struct task_struct *child, void ptrace_enable(struct task_struct *child) { - unsigned long tmp; - tmp = get_reg(child, PT_SYSCFG) | (TRACE_BITS); - put_reg(child, PT_SYSCFG, tmp); + struct pt_regs *regs = task_pt_regs(child); + regs->syscfg |= SYSCFG_SSSTEP; } /* @@ -174,10 +167,8 @@ void ptrace_enable(struct task_struct *child) */ void ptrace_disable(struct task_struct *child) { - unsigned long tmp; - /* make sure the single step bit is not set. */ - tmp = get_reg(child, PT_SYSCFG) & ~TRACE_BITS; - put_reg(child, PT_SYSCFG, tmp); + struct pt_regs *regs = task_pt_regs(child); + regs->syscfg &= ~SYSCFG_SSSTEP; } long arch_ptrace(struct task_struct *child, long request, long addr, long data) @@ -343,14 +334,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) break; } - if (addr >= (sizeof(struct pt_regs))) { + /* Ignore writes to SYSCFG and other pseudo regs */ + if (addr >= PT_SYSCFG) { ret = 0; break; } - if (addr == PT_SYSCFG) { - data &= SYSCFG_MASK; - data |= get_reg(child, PT_SYSCFG); - } ret = put_reg(child, addr, data); break; -- cgit v1.2.3 From f2ce48024a9a6d3e92a023ded0f7b1e99da1cd16 Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Sun, 14 Feb 2010 22:56:24 +0000 Subject: Blackfin: simplify PTRACE_{PEEK,POKE}USR in preperation for regset support Signed-off-by: Mike Frysinger --- arch/blackfin/include/asm/ptrace.h | 2 + arch/blackfin/kernel/ptrace.c | 118 ++++++++++++++++--------------------- 2 files changed, 53 insertions(+), 67 deletions(-) diff --git a/arch/blackfin/include/asm/ptrace.h b/arch/blackfin/include/asm/ptrace.h index b33a4488f498..c1aebdb981c7 100644 --- a/arch/blackfin/include/asm/ptrace.h +++ b/arch/blackfin/include/asm/ptrace.h @@ -173,4 +173,6 @@ extern void show_regs(struct pt_regs *); #define PT_FDPIC_EXEC 232 #define PT_FDPIC_INTERP 236 +#define PT_LAST_PSEUDO PT_FDPIC_INTERP + #endif /* _BFIN_PTRACE_H */ diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c index 14118e40f18f..9536c1e2d1c7 100644 --- a/arch/blackfin/kernel/ptrace.c +++ b/arch/blackfin/kernel/ptrace.c @@ -25,7 +25,6 @@ #include #include -#define TEXT_OFFSET 0 /* * does not yet catch signals sent when the child dies. * in exit.c or in signal.c. @@ -43,7 +42,7 @@ * kernel stack will not be empty on entry to the kernel, so * ptracing these tasks will fail. */ -static inline struct pt_regs *get_user_regs(struct task_struct *task) +static inline struct pt_regs *task_pt_regs(struct task_struct *task) { return (struct pt_regs *) ((unsigned long)task_stack_page(task) + @@ -56,7 +55,7 @@ static inline struct pt_regs *get_user_regs(struct task_struct *task) static inline int ptrace_getregs(struct task_struct *tsk, void __user *uregs) { struct pt_regs regs; - memcpy(®s, get_user_regs(tsk), sizeof(regs)); + memcpy(®s, task_pt_regs(tsk), sizeof(regs)); regs.usp = tsk->thread.usp; return copy_to_user(uregs, ®s, sizeof(struct pt_regs)) ? -EFAULT : 0; } @@ -69,40 +68,49 @@ static inline int ptrace_getregs(struct task_struct *tsk, void __user *uregs) /* * Get contents of register REGNO in task TASK. */ -static inline long get_reg(struct task_struct *task, int regno) +static inline long +get_reg(struct task_struct *task, long regno, unsigned long __user *datap) { - unsigned char *reg_ptr; + long tmp; + struct pt_regs *regs = task_pt_regs(task); - struct pt_regs *regs = - (struct pt_regs *)((unsigned long)task_stack_page(task) + - (THREAD_SIZE - sizeof(struct pt_regs))); - reg_ptr = (char *)regs; + if (regno & 3 || regno > PT_LAST_PSEUDO || regno < 0) + return -EIO; switch (regno) { + case PT_TEXT_ADDR: + tmp = task->mm->start_code; + break; + case PT_TEXT_END_ADDR: + tmp = task->mm->end_code; + break; + case PT_DATA_ADDR: + tmp = task->mm->start_data; + break; case PT_USP: - return task->thread.usp; + tmp = task->thread.usp; + break; default: - if (regno <= 216) - return *(long *)(reg_ptr + regno); + if (regno < sizeof(*regs)) { + void *reg_ptr = regs; + tmp = *(long *)(reg_ptr + regno); + } else + return -EIO; } - /* slight mystery ... never seems to come here but kernel misbehaves without this code! */ - printk(KERN_WARNING "Request to get for unknown register %d\n", regno); - return 0; + return put_user(tmp, datap); } /* * Write contents of register REGNO in task TASK. */ static inline int -put_reg(struct task_struct *task, int regno, unsigned long data) +put_reg(struct task_struct *task, long regno, unsigned long data) { - char *reg_ptr; + struct pt_regs *regs = task_pt_regs(task); - struct pt_regs *regs = - (struct pt_regs *)((unsigned long)task_stack_page(task) + - (THREAD_SIZE - sizeof(struct pt_regs))); - reg_ptr = (char *)regs; + if (regno & 3 || regno > PT_LAST_PSEUDO || regno < 0) + return -EIO; switch (regno) { case PT_PC: @@ -119,10 +127,18 @@ put_reg(struct task_struct *task, int regno, unsigned long data) regs->usp = data; task->thread.usp = data; break; + case PT_SYSCFG: /* don't let userspace screw with this */ + if ((data & ~1) != 0x6) + pr_warning("ptrace: ignore syscfg write of %#lx\n", data); + break; /* regs->syscfg = data; break; */ default: - if (regno <= 216) - *(long *)(reg_ptr + regno) = data; + if (regno < sizeof(*regs)) { + void *reg_offset = regs; + *(long *)(reg_offset + regno) = data; + } + /* Ignore writes to pseudo registers */ } + return 0; } @@ -231,40 +247,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) break; } - /* read the word at location addr in the USER area. */ - case PTRACE_PEEKUSR: - { - unsigned long tmp; - ret = -EIO; - tmp = 0; - if ((addr & 3) || (addr > (sizeof(struct pt_regs) + 16))) { - printk(KERN_WARNING "ptrace error : PEEKUSR : temporarily returning " - "0 - %x sizeof(pt_regs) is %lx\n", - (int)addr, sizeof(struct pt_regs)); - break; - } - if (addr == sizeof(struct pt_regs)) { - /* PT_TEXT_ADDR */ - tmp = child->mm->start_code + TEXT_OFFSET; - } else if (addr == (sizeof(struct pt_regs) + 4)) { - /* PT_TEXT_END_ADDR */ - tmp = child->mm->end_code; - } else if (addr == (sizeof(struct pt_regs) + 8)) { - /* PT_DATA_ADDR */ - tmp = child->mm->start_data; -#ifdef CONFIG_BINFMT_ELF_FDPIC - } else if (addr == (sizeof(struct pt_regs) + 12)) { - goto case_PTRACE_GETFDPIC_EXEC; - } else if (addr == (sizeof(struct pt_regs) + 16)) { - goto case_PTRACE_GETFDPIC_INTERP; -#endif - } else { - tmp = get_reg(child, addr); - } - ret = put_user(tmp, datap); - break; - } - #ifdef CONFIG_BINFMT_ELF_FDPIC case PTRACE_GETFDPIC: { unsigned long tmp = 0; @@ -327,19 +309,21 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) break; } - case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ - ret = -EIO; - if ((addr & 3) || (addr > (sizeof(struct pt_regs) + 16))) { - printk(KERN_WARNING "ptrace error : POKEUSR: temporarily returning 0\n"); - break; + case PTRACE_PEEKUSR: + switch (addr) { +#ifdef CONFIG_BINFMT_ELF_FDPIC /* backwards compat */ + case PT_FDPIC_EXEC: goto case_PTRACE_GETFDPIC_EXEC; + case PT_FDPIC_INTERP: goto case_PTRACE_GETFDPIC_INTERP; +#endif + default: + ret = get_reg(child, addr, datap); } + pr_debug("ptrace: PEEKUSR reg %li with %#lx = %i\n", addr, data, ret); + break; - /* Ignore writes to SYSCFG and other pseudo regs */ - if (addr >= PT_SYSCFG) { - ret = 0; - break; - } + case PTRACE_POKEUSR: ret = put_reg(child, addr, data); + pr_debug("ptrace: POKEUSR reg %li with %li = %i\n", addr, data, ret); break; case PTRACE_GETREGS: -- cgit v1.2.3 From e50e2f25c5b90abd00a1e5871c45094cf5207afc Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Sun, 14 Feb 2010 22:58:02 +0000 Subject: Blackfin: initial regset support We don't support core dumps (yet?), but this should make things easier. Signed-off-by: Mike Frysinger --- arch/blackfin/include/asm/elf.h | 8 ++- arch/blackfin/kernel/process.c | 7 --- arch/blackfin/kernel/ptrace.c | 113 ++++++++++++++++++++++++++++++++-------- 3 files changed, 97 insertions(+), 31 deletions(-) diff --git a/arch/blackfin/include/asm/elf.h b/arch/blackfin/include/asm/elf.h index 5b50f0ecacf8..117713adea7f 100644 --- a/arch/blackfin/include/asm/elf.h +++ b/arch/blackfin/include/asm/elf.h @@ -22,12 +22,15 @@ #define EF_BFIN_CODE_IN_L2 0x00000040 /* --code-in-l2 */ #define EF_BFIN_DATA_IN_L2 0x00000080 /* --data-in-l2 */ +#if 1 /* core dumps not supported, but linux/elfcore.h needs these */ typedef unsigned long elf_greg_t; -#define ELF_NGREG 40 /* (sizeof(struct user_regs_struct) / sizeof(elf_greg_t)) */ +#define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t)) typedef elf_greg_t elf_gregset_t[ELF_NGREG]; typedef struct { } elf_fpregset_t; +#endif + /* * This is used to ensure we don't load something for the wrong architecture. */ @@ -55,6 +58,9 @@ do { \ _regs->p2 = _dynamic_addr; \ } while(0) +#if 0 +#define CORE_DUMP_USE_REGSET +#endif #define ELF_FDPIC_CORE_EFLAGS EF_BFIN_FDPIC #define ELF_EXEC_PAGESIZE 4096 diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c index b56b0e485e0b..29705cec91de 100644 --- a/arch/blackfin/kernel/process.c +++ b/arch/blackfin/kernel/process.c @@ -98,13 +98,6 @@ void cpu_idle(void) } } -/* Fill in the fpu structure for a core dump. */ - -int dump_fpu(struct pt_regs *regs, elf_fpregset_t * fpregs) -{ - return 1; -} - /* * This gets run with P1 containing the * function to call, and R1 containing diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c index 9536c1e2d1c7..92b4ca0b5af6 100644 --- a/arch/blackfin/kernel/ptrace.c +++ b/arch/blackfin/kernel/ptrace.c @@ -9,9 +9,11 @@ #include #include #include +#include #include #include #include +#include #include #include @@ -49,22 +51,6 @@ static inline struct pt_regs *task_pt_regs(struct task_struct *task) (THREAD_SIZE - sizeof(struct pt_regs))); } -/* - * Get all user integer registers. - */ -static inline int ptrace_getregs(struct task_struct *tsk, void __user *uregs) -{ - struct pt_regs regs; - memcpy(®s, task_pt_regs(tsk), sizeof(regs)); - regs.usp = tsk->thread.usp; - return copy_to_user(uregs, ®s, sizeof(struct pt_regs)) ? -EFAULT : 0; -} - -/* Mapping from PT_xxx to the stack offset at which the register is - * saved. Notice that usp has no stack-slot and needs to be treated - * specially (see get_reg/put_reg below). - */ - /* * Get contents of register REGNO in task TASK. */ @@ -170,6 +156,84 @@ static inline int is_user_addr_valid(struct task_struct *child, return -EIO; } +/* + * retrieve the contents of Blackfin userspace general registers + */ +static int genregs_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + struct pt_regs *regs = task_pt_regs(target); + int ret; + + /* This sucks ... */ + regs->usp = target->thread.usp; + + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, + regs, 0, sizeof(*regs)); + if (ret < 0) + return ret; + + return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, + sizeof(*regs), -1); +} + +/* + * update the contents of the Blackfin userspace general registers + */ +static int genregs_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct pt_regs *regs = task_pt_regs(target); + int ret; + + /* Don't let people set SYSCFG (it's at the end of pt_regs) */ + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + regs, 0, PT_SYSCFG); + if (ret < 0) + return ret; + + /* This sucks ... */ + target->thread.usp = regs->usp; + /* regs->retx = regs->pc; */ + + return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, + PT_SYSCFG, -1); +} + +/* + * Define the register sets available on the Blackfin under Linux + */ +enum bfin_regset { + REGSET_GENERAL, +}; + +static const struct user_regset bfin_regsets[] = { + [REGSET_GENERAL] = { + .core_note_type = NT_PRSTATUS, + .n = sizeof(struct pt_regs) / sizeof(long), + .size = sizeof(long), + .align = sizeof(long), + .get = genregs_get, + .set = genregs_set, + }, +}; + +static const struct user_regset_view user_bfin_native_view = { + .name = "Blackfin", + .e_machine = EM_BLACKFIN, + .regsets = bfin_regsets, + .n = ARRAY_SIZE(bfin_regsets), +}; + +const struct user_regset_view *task_user_regset_view(struct task_struct *task) +{ + return &user_bfin_native_view; +} + void ptrace_enable(struct task_struct *child) { struct pt_regs *regs = task_pt_regs(child); @@ -327,15 +391,18 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) break; case PTRACE_GETREGS: - /* Get all gp regs from the child. */ - ret = ptrace_getregs(child, datap); - break; + pr_debug("ptrace: PTRACE_GETREGS\n"); + return copy_regset_to_user(child, &user_bfin_native_view, + REGSET_GENERAL, + 0, sizeof(struct pt_regs), + (void __user *)data); case PTRACE_SETREGS: - printk(KERN_WARNING "ptrace: SETREGS: **** NOT IMPLEMENTED ***\n"); - /* Set all gp regs in the child. */ - ret = 0; - break; + pr_debug("ptrace: PTRACE_SETREGS\n"); + return copy_regset_from_user(child, &user_bfin_native_view, + REGSET_GENERAL, + 0, sizeof(struct pt_regs), + (const void __user *)data); default: ret = ptrace_request(child, request, addr, data); -- cgit v1.2.3 From e8f263dfd32a784a816fe68956e564f8ede4a9fc Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Tue, 26 Jan 2010 07:33:53 +0000 Subject: Blackfin: initial tracehook support Signed-off-by: Mike Frysinger --- arch/blackfin/Kconfig | 1 + arch/blackfin/include/asm/ptrace.h | 23 +++++++++ arch/blackfin/include/asm/syscall.h | 96 +++++++++++++++++++++++++++++++++++++ arch/blackfin/kernel/ptrace.c | 66 +++++++------------------ arch/blackfin/kernel/signal.c | 14 ++---- arch/blackfin/mach-common/entry.S | 6 ++- 6 files changed, 144 insertions(+), 62 deletions(-) create mode 100644 arch/blackfin/include/asm/syscall.h diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index 970df5b5c525..c078849df7f9 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig @@ -24,6 +24,7 @@ config RWSEM_XCHGADD_ALGORITHM config BLACKFIN def_bool y select HAVE_ARCH_KGDB + select HAVE_ARCH_TRACEHOOK select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACE_MCOUNT_TEST diff --git a/arch/blackfin/include/asm/ptrace.h b/arch/blackfin/include/asm/ptrace.h index c1aebdb981c7..aaa1c6c2bc19 100644 --- a/arch/blackfin/include/asm/ptrace.h +++ b/arch/blackfin/include/asm/ptrace.h @@ -24,6 +24,8 @@ #ifndef __ASSEMBLY__ +struct task_struct; + /* this struct defines the way the registers are stored on the stack during a system call. */ @@ -101,9 +103,30 @@ struct pt_regs { master interrupt enable. */ #define user_mode(regs) (!(((regs)->ipend & ~0x10) & (((regs)->ipend & ~0x10) - 1))) #define instruction_pointer(regs) ((regs)->pc) +#define user_stack_pointer(regs) ((regs)->usp) #define profile_pc(regs) instruction_pointer(regs) extern void show_regs(struct pt_regs *); +#define arch_has_single_step() (1) +extern void user_enable_single_step(struct task_struct *child); +extern void user_disable_single_step(struct task_struct *child); +/* common code demands this function */ +#define ptrace_disable(child) user_disable_single_step(child) + +/* + * Get the address of the live pt_regs for the specified task. + * These are saved onto the top kernel stack when the process + * is not running. + * + * Note: if a user thread is execve'd from kernel space, the + * kernel stack will not be empty on entry to the kernel, so + * ptracing these tasks will fail. + */ +#define task_pt_regs(task) \ + (struct pt_regs *) \ + ((unsigned long)task_stack_page(task) + \ + (THREAD_SIZE - sizeof(struct pt_regs))) + #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ diff --git a/arch/blackfin/include/asm/syscall.h b/arch/blackfin/include/asm/syscall.h new file mode 100644 index 000000000000..4921a4815cce --- /dev/null +++ b/arch/blackfin/include/asm/syscall.h @@ -0,0 +1,96 @@ +/* + * Magic syscall break down functions + * + * Copyright 2010 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#ifndef __ASM_BLACKFIN_SYSCALL_H__ +#define __ASM_BLACKFIN_SYSCALL_H__ + +/* + * Blackfin syscalls are simple: + * enter: + * p0: syscall number + * r{0,1,2,3,4,5}: syscall args 0,1,2,3,4,5 + * exit: + * r0: return/error value + */ + +#include +#include +#include + +static inline long +syscall_get_nr(struct task_struct *task, struct pt_regs *regs) +{ + return regs->p0; +} + +static inline void +syscall_rollback(struct task_struct *task, struct pt_regs *regs) +{ + regs->p0 = regs->orig_p0; +} + +static inline long +syscall_get_error(struct task_struct *task, struct pt_regs *regs) +{ + return IS_ERR_VALUE(regs->r0) ? regs->r0 : 0; +} + +static inline long +syscall_get_return_value(struct task_struct *task, struct pt_regs *regs) +{ + return regs->r0; +} + +static inline void +syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, + int error, long val) +{ + regs->r0 = error ? -error : val; +} + +/** + * syscall_get_arguments() + * @task: unused + * @regs: the register layout to extract syscall arguments from + * @i: first syscall argument to extract + * @n: number of syscall arguments to extract + * @args: array to return the syscall arguments in + * + * args[0] gets i'th argument, args[n - 1] gets the i+n-1'th argument + */ +static inline void +syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, + unsigned int i, unsigned int n, unsigned long *args) +{ + /* + * Assume the ptrace layout doesn't change -- r5 is first in memory, + * then r4, ..., then r0. So we simply reverse the ptrace register + * array in memory to store into the args array. + */ + long *aregs = ®s->r0 - i; + + BUG_ON(i > 5 || i + n > 6); + + while (n--) + *args++ = *aregs--; +} + +/* See syscall_get_arguments() comments */ +static inline void +syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, + unsigned int i, unsigned int n, const unsigned long *args) +{ + long *aregs = ®s->r0 - i; + + BUG_ON(i > 5 || i + n > 6); + + while (n--) + *aregs-- = *args++; +} + +#endif diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c index 92b4ca0b5af6..0618b8287e34 100644 --- a/arch/blackfin/kernel/ptrace.c +++ b/arch/blackfin/kernel/ptrace.c @@ -1,6 +1,6 @@ /* * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds - * these modifications are Copyright 2004-2009 Analog Devices Inc. + * these modifications are Copyright 2004-2010 Analog Devices Inc. * * Licensed under the GPL-2 */ @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -32,25 +33,6 @@ * in exit.c or in signal.c. */ -/* Find the stack offset for a register, relative to thread.esp0. */ -#define PT_REG(reg) ((long)&((struct pt_regs *)0)->reg) - -/* - * Get the address of the live pt_regs for the specified task. - * These are saved onto the top kernel stack when the process - * is not running. - * - * Note: if a user thread is execve'd from kernel space, the - * kernel stack will not be empty on entry to the kernel, so - * ptracing these tasks will fail. - */ -static inline struct pt_regs *task_pt_regs(struct task_struct *task) -{ - return (struct pt_regs *) - ((unsigned long)task_stack_page(task) + - (THREAD_SIZE - sizeof(struct pt_regs))); -} - /* * Get contents of register REGNO in task TASK. */ @@ -234,18 +216,13 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task) return &user_bfin_native_view; } -void ptrace_enable(struct task_struct *child) +void user_enable_single_step(struct task_struct *child) { struct pt_regs *regs = task_pt_regs(child); regs->syscfg |= SYSCFG_SSSTEP; } -/* - * Called by kernel/ptrace.c when detaching.. - * - * Make sure the single step bit is not set. - */ -void ptrace_disable(struct task_struct *child) +void user_disable_single_step(struct task_struct *child) { struct pt_regs *regs = task_pt_regs(child); regs->syscfg &= ~SYSCFG_SSSTEP; @@ -412,27 +389,18 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) return ret; } -asmlinkage void syscall_trace(void) +asmlinkage int syscall_trace_enter(struct pt_regs *regs) { - if (!test_thread_flag(TIF_SYSCALL_TRACE)) - return; - - if (!(current->ptrace & PT_PTRACED)) - return; - - /* the 0x80 provides a way for the tracing parent to distinguish - * between a syscall stop and SIGTRAP delivery - */ - ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) - ? 0x80 : 0)); - - /* - * this isn't the same as continuing with a signal, but it will do - * for normal use. strace only continues with a signal if the - * stopping signal is not SIGTRAP. -brl - */ - if (current->exit_code) { - send_sig(current->exit_code, current, 1); - current->exit_code = 0; - } + int ret = 0; + + if (test_thread_flag(TIF_SYSCALL_TRACE)) + ret = tracehook_report_syscall_entry(regs); + + return ret; +} + +asmlinkage void syscall_trace_leave(struct pt_regs *regs) +{ + if (test_thread_flag(TIF_SYSCALL_TRACE)) + tracehook_report_syscall_exit(regs, 0); } diff --git a/arch/blackfin/kernel/signal.c b/arch/blackfin/kernel/signal.c index e0fd63e9e38a..e60990c0a1f0 100644 --- a/arch/blackfin/kernel/signal.c +++ b/arch/blackfin/kernel/signal.c @@ -1,5 +1,5 @@ /* - * Copyright 2004-2009 Analog Devices Inc. + * Copyright 2004-2010 Analog Devices Inc. * * Licensed under the GPL-2 or later */ @@ -206,16 +206,6 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t * info, regs->r1 = (unsigned long)(&frame->info); regs->r2 = (unsigned long)(&frame->uc); - /* - * Clear the trace flag when entering the signal handler, but - * notify any tracer that was single-stepping it. The tracer - * may want to single-step inside the handler too. - */ - if (regs->syscfg & TRACE_BITS) { - regs->syscfg &= ~TRACE_BITS; - ptrace_notify(SIGTRAP); - } - return 0; give_sigsegv: @@ -315,6 +305,8 @@ asmlinkage void do_signal(struct pt_regs *regs) * clear the TIF_RESTORE_SIGMASK flag */ if (test_thread_flag(TIF_RESTORE_SIGMASK)) clear_thread_flag(TIF_RESTORE_SIGMASK); + + tracehook_signal_handler(signr, &info, &ka, regs, 1); } return; diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S index 1fa414f7852f..0df5b834d34e 100644 --- a/arch/blackfin/mach-common/entry.S +++ b/arch/blackfin/mach-common/entry.S @@ -736,7 +736,8 @@ ENDPROC(_system_call) * this symbol need not be global anyways, so ... */ _sys_trace: - pseudo_long_call _syscall_trace, p5; + r0 = sp; + pseudo_long_call _syscall_trace_enter, p5; /* Execute the appropriate system call */ @@ -760,7 +761,8 @@ _sys_trace: SP += 24; [sp + PT_R0] = r0; - pseudo_long_call _syscall_trace, p5; + r0 = sp; + pseudo_long_call _syscall_trace_leave, p5; jump .Lresume_userspace; ENDPROC(_sys_trace) -- cgit v1.2.3 From 600482c13d3d3612d71f39d8aaec47f63aafa801 Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Wed, 17 Feb 2010 10:44:22 +0000 Subject: Blackfin: fix single stepping over system calls On Blackfin systems, the hardware single step exception triggers before the system call exception, so we need to save this info to process it later on. Otherwise, single stepping in userspace misses a few insns right after the system call. This is based a bit on the SuperH code added in commit 4b505db9c4c72dbd. Reported-by: Roland McGrath Signed-off-by: Mike Frysinger --- arch/blackfin/include/asm/thread_info.h | 2 ++ arch/blackfin/kernel/ptrace.c | 11 +++++++++-- arch/blackfin/kernel/signal.c | 3 ++- arch/blackfin/mach-common/entry.S | 2 ++ 4 files changed, 15 insertions(+), 3 deletions(-) diff --git a/arch/blackfin/include/asm/thread_info.h b/arch/blackfin/include/asm/thread_info.h index 0149c92ceeb0..e9a5614cdbb1 100644 --- a/arch/blackfin/include/asm/thread_info.h +++ b/arch/blackfin/include/asm/thread_info.h @@ -103,6 +103,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_FREEZE 6 /* is freezing for suspend */ #define TIF_IRQ_SYNC 7 /* sync pipeline stage */ #define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ +#define TIF_SINGLESTEP 9 /* as above, but as bit values */ #define _TIF_SYSCALL_TRACE (1<syscfg |= SYSCFG_SSSTEP; + + set_tsk_thread_flag(child, TIF_SINGLESTEP); } void user_disable_single_step(struct task_struct *child) { struct pt_regs *regs = task_pt_regs(child); regs->syscfg &= ~SYSCFG_SSSTEP; + + clear_tsk_thread_flag(child, TIF_SINGLESTEP); } long arch_ptrace(struct task_struct *child, long request, long addr, long data) @@ -401,6 +405,9 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs) asmlinkage void syscall_trace_leave(struct pt_regs *regs) { - if (test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(regs, 0); + int step; + + step = test_thread_flag(TIF_SINGLESTEP); + if (step || test_thread_flag(TIF_SYSCALL_TRACE)) + tracehook_report_syscall_exit(regs, step); } diff --git a/arch/blackfin/kernel/signal.c b/arch/blackfin/kernel/signal.c index e60990c0a1f0..28d6f28c058c 100644 --- a/arch/blackfin/kernel/signal.c +++ b/arch/blackfin/kernel/signal.c @@ -306,7 +306,8 @@ asmlinkage void do_signal(struct pt_regs *regs) if (test_thread_flag(TIF_RESTORE_SIGMASK)) clear_thread_flag(TIF_RESTORE_SIGMASK); - tracehook_signal_handler(signr, &info, &ka, regs, 1); + tracehook_signal_handler(signr, &info, &ka, regs, + test_thread_flag(TIF_SINGLESTEP)); } return; diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S index 0df5b834d34e..0a9e458d0f7e 100644 --- a/arch/blackfin/mach-common/entry.S +++ b/arch/blackfin/mach-common/entry.S @@ -642,6 +642,8 @@ ENTRY(_system_call) r7 = [p2+TI_FLAGS]; CC = BITTST(r7,TIF_SYSCALL_TRACE); if CC JUMP _sys_trace; + CC = BITTST(r7,TIF_SINGLESTEP); + if CC JUMP _sys_trace; /* Execute the appropriate system call */ -- cgit v1.2.3 From 9e228ee9eae97b533d3b3133f76478c70dbd4294 Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Fri, 12 Feb 2010 07:24:34 +0000 Subject: Blackfin: check for bad syscalls after tracing it We want to report all system calls (even invalid ones) to the tracing layers, so check the NR only after we've notified. Signed-off-by: Mike Frysinger --- arch/blackfin/mach-common/entry.S | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S index 0a9e458d0f7e..6c20044c7f4c 100644 --- a/arch/blackfin/mach-common/entry.S +++ b/arch/blackfin/mach-common/entry.S @@ -626,13 +626,6 @@ ENTRY(_system_call) p0 = [sp + PT_ORIG_P0]; #endif /* CONFIG_IPIPE */ - /* Check the System Call */ - r7 = __NR_syscall; - /* System call number is passed in P0 */ - r6 = p0; - cc = r6 < r7; - if ! cc jump .Lbadsys; - /* are we tracing syscalls?*/ r7 = sp; r6.l = lo(ALIGN_PAGE_MASK); @@ -645,6 +638,12 @@ ENTRY(_system_call) CC = BITTST(r7,TIF_SINGLESTEP); if CC JUMP _sys_trace; + /* Make sure the system call # is valid */ + p4 = __NR_syscall; + /* System call number is passed in P0 */ + cc = p4 <= p0; + if cc jump .Lbadsys; + /* Execute the appropriate system call */ p4 = p0; @@ -741,9 +740,14 @@ _sys_trace: r0 = sp; pseudo_long_call _syscall_trace_enter, p5; - /* Execute the appropriate system call */ - + /* Make sure the system call # is valid */ p4 = [SP + PT_P0]; + p3 = __NR_syscall; + cc = p3 <= p4; + r0 = -ENOSYS; + if cc jump .Lsys_trace_badsys; + + /* Execute the appropriate system call */ p5.l = _sys_call_table; p5.h = _sys_call_table; p5 = p5 + (p4 << 2); @@ -761,6 +765,7 @@ _sys_trace: SP += -12; call (p5); SP += 24; +.Lsys_trace_badsys: [sp + PT_R0] = r0; r0 = sp; -- cgit v1.2.3 From ddaebcabbc18811ac4ac6e84fb9c327e8393a1dc Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Fri, 29 Jan 2010 01:33:54 +0000 Subject: Blackfin: add support for restart_syscall() Signed-off-by: Mike Frysinger --- arch/blackfin/kernel/signal.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/arch/blackfin/kernel/signal.c b/arch/blackfin/kernel/signal.c index 28d6f28c058c..d536f35d1f43 100644 --- a/arch/blackfin/kernel/signal.c +++ b/arch/blackfin/kernel/signal.c @@ -17,6 +17,7 @@ #include #include #include +#include #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) @@ -50,6 +51,9 @@ rt_restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *p unsigned long usp = 0; int err = 0; + /* Always make any pending restarted system calls return -EINTR */ + current_thread_info()->restart_block.fn = do_no_restart_syscall; + #define RESTORE(x) err |= __get_user(regs->x, &sc->sc_##x) /* restore passed registers */ @@ -237,6 +241,11 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler) regs->r0 = regs->orig_r0; regs->pc -= 2; break; + + case -ERESTART_RESTARTBLOCK: + regs->p0 = __NR_restart_syscall; + regs->pc -= 2; + break; } } -- cgit v1.2.3 From fe5b25c09873faee44077ee6ff8f23eee61b0fa0 Mon Sep 17 00:00:00 2001 From: Michael Hennerich Date: Thu, 4 Feb 2010 14:41:39 +0000 Subject: Blackfin: bf537-stamp: add example ADS7846 touchscreen resources Signed-off-by: Michael Hennerich Signed-off-by: Mike Frysinger --- arch/blackfin/mach-bf537/boards/stamp.c | 36 ++++++++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c index 9bc7bd3f35f8..7a1d645bbfbb 100644 --- a/arch/blackfin/mach-bf537/boards/stamp.c +++ b/arch/blackfin/mach-bf537/boards/stamp.c @@ -825,6 +825,29 @@ static inline void adf702x_mac_init(void) static inline void adf702x_mac_init(void) {} #endif +#if defined(CONFIG_TOUCHSCREEN_ADS7846) || defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE) +#include +static struct bfin5xx_spi_chip ad7873_spi_chip_info = { + .bits_per_word = 8, +}; + +static int ads7873_get_pendown_state(void) +{ + return gpio_get_value(GPIO_PF6); +} + +static struct ads7846_platform_data __initdata ad7873_pdata = { + .model = 7873, /* AD7873 */ + .x_max = 0xfff, + .y_max = 0xfff, + .x_plate_ohms = 620, + .debounce_max = 1, + .debounce_rep = 0, + .debounce_tol = (~0), + .get_pendown_state = ads7873_get_pendown_state, +}; +#endif + #if defined(CONFIG_MTD_DATAFLASH) \ || defined(CONFIG_MTD_DATAFLASH_MODULE) @@ -1026,7 +1049,18 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { .mode = SPI_MODE_0, }, #endif - +#if defined(CONFIG_TOUCHSCREEN_ADS7846) || defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE) + { + .modalias = "ads7846", + .max_speed_hz = 2000000, /* max spi clock (SCK) speed in HZ */ + .bus_num = 0, + .irq = IRQ_PF6, + .chip_select = GPIO_PF10 + MAX_CTRL_CS, /* GPIO controlled SSEL */ + .controller_data = &ad7873_spi_chip_info, + .platform_data = &ad7873_pdata, + .mode = SPI_MODE_0, + }, +#endif }; #if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) -- cgit v1.2.3 From 2bc4affe9c374983220c1a5d5566ce67c95384fc Mon Sep 17 00:00:00 2001 From: Frans Pop Date: Sat, 6 Feb 2010 18:47:18 +0100 Subject: Blackfin: remove trailing space in messages Signed-off-by: Frans Pop Signed-off-by: Mike Frysinger --- arch/blackfin/kernel/bfin_dma_5xx.c | 10 +++++----- arch/blackfin/kernel/traps.c | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/blackfin/kernel/bfin_dma_5xx.c b/arch/blackfin/kernel/bfin_dma_5xx.c index 924c00286bab..26403d1c9e65 100644 --- a/arch/blackfin/kernel/bfin_dma_5xx.c +++ b/arch/blackfin/kernel/bfin_dma_5xx.c @@ -91,7 +91,7 @@ late_initcall(proc_dma_init); */ int request_dma(unsigned int channel, const char *device_id) { - pr_debug("request_dma() : BEGIN \n"); + pr_debug("request_dma() : BEGIN\n"); if (device_id == NULL) printk(KERN_WARNING "request_dma(%u): no device_id given\n", channel); @@ -107,7 +107,7 @@ int request_dma(unsigned int channel, const char *device_id) #endif if (atomic_cmpxchg(&dma_ch[channel].chan_status, 0, 1)) { - pr_debug("DMA CHANNEL IN USE \n"); + pr_debug("DMA CHANNEL IN USE\n"); return -EBUSY; } @@ -131,7 +131,7 @@ int request_dma(unsigned int channel, const char *device_id) * you have to request DMA, before doing any operations on * descriptor/channel */ - pr_debug("request_dma() : END \n"); + pr_debug("request_dma() : END\n"); return 0; } EXPORT_SYMBOL(request_dma); @@ -171,7 +171,7 @@ static void clear_dma_buffer(unsigned int channel) void free_dma(unsigned int channel) { - pr_debug("freedma() : BEGIN \n"); + pr_debug("freedma() : BEGIN\n"); BUG_ON(channel >= MAX_DMA_CHANNELS || !atomic_read(&dma_ch[channel].chan_status)); @@ -185,7 +185,7 @@ void free_dma(unsigned int channel) /* Clear the DMA Variable in the Channel */ atomic_set(&dma_ch[channel].chan_status, 0); - pr_debug("freedma() : END \n"); + pr_debug("freedma() : END\n"); } EXPORT_SYMBOL(free_dma); diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c index 4eaca2d1deed..ba70c4bc2699 100644 --- a/arch/blackfin/kernel/traps.c +++ b/arch/blackfin/kernel/traps.c @@ -1116,7 +1116,7 @@ void dump_bfin_mem(struct pt_regs *fp) /* And the last RETI points to the current userspace context */ if ((fp + 1)->pc >= current->mm->start_code && (fp + 1)->pc <= current->mm->end_code) { - verbose_printk(KERN_NOTICE "It might be better to look around here : \n"); + verbose_printk(KERN_NOTICE "It might be better to look around here :\n"); verbose_printk(KERN_NOTICE "-------------------------------------------\n"); show_regs(fp + 1); verbose_printk(KERN_NOTICE "-------------------------------------------\n"); -- cgit v1.2.3 From f32792d045e1bbd86c0af0a28a46ae87af1ae100 Mon Sep 17 00:00:00 2001 From: Sonic Zhang Date: Tue, 9 Feb 2010 02:47:09 +0000 Subject: Blackfin: bf537-stamp: add example AD5398 power regulator resources Signed-off-by: Sonic Zhang Signed-off-by: Mike Frysinger --- arch/blackfin/mach-bf537/boards/stamp.c | 74 +++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c index 7a1d645bbfbb..845629260290 100644 --- a/arch/blackfin/mach-bf537/boards/stamp.c +++ b/arch/blackfin/mach-bf537/boards/stamp.c @@ -35,6 +35,11 @@ #include #include #include +#ifdef CONFIG_REGULATOR_AD5398 +#include +#endif +#include +#include /* * Name the Board for the /proc/cpuinfo @@ -1634,6 +1639,59 @@ static struct adp8870_backlight_platform_data adp8870_pdata = { }; #endif +#if defined(CONFIG_REGULATOR_AD5398) || defined(CONFIG_REGULATOR_AD5398_MODULE) +static struct regulator_consumer_supply ad5398_consumer = { + .supply = "current", +}; + +static struct regulator_init_data ad5398_regulator_data = { + .constraints = { + .name = "current range", + .max_uA = 120000, + .valid_ops_mask = REGULATOR_CHANGE_CURRENT | REGULATOR_CHANGE_STATUS, + }, + .num_consumer_supplies = 1, + .consumer_supplies = &ad5398_consumer, +}; + +static struct ad5398_platform_data ad5398_i2c_platform_data = { + .current_bits = 10, + .current_offset = 4, + .regulator_data = &ad5398_regulator_data, +}; + +#if defined(CONFIG_REGULATOR_VIRTUAL_CONSUMER) || \ + defined(CONFIG_REGULATOR_VIRTUAL_CONSUMER_MODULE) +static struct platform_device ad5398_virt_consumer_device = { + .name = "reg-virt-consumer", + .id = 0, + .dev = { + .platform_data = "current", /* Passed to driver */ + }, +}; +#endif +#if defined(CONFIG_REGULATOR_USERSPACE_CONSUMER) || \ + defined(CONFIG_REGULATOR_USERSPACE_CONSUMER_MODULE) +static struct regulator_bulk_data ad5398_bulk_data = { + .supply = "current", +}; + +static struct regulator_userspace_consumer_data ad5398_userspace_comsumer_data = { + .name = "ad5398", + .num_supplies = 1, + .supplies = &ad5398_bulk_data, +}; + +static struct platform_device ad5398_userspace_consumer_device = { + .name = "reg-userspace-consumer", + .id = 0, + .dev = { + .platform_data = &ad5398_userspace_comsumer_data, + }, +}; +#endif +#endif + static struct i2c_board_info __initdata bfin_i2c_board_info[] = { #if defined(CONFIG_INPUT_AD714X_I2C) || defined(CONFIG_INPUT_AD714X_I2C_MODULE) { @@ -1743,6 +1801,12 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = { I2C_BOARD_INFO("ssm2602", 0x1b), }, #endif +#if defined(CONFIG_REGULATOR_AD5398) || defined(CONFIG_REGULATOR_AD5398_MODULE) + { + I2C_BOARD_INFO("ad5398", 0xC), + .platform_data = (void *)&ad5398_i2c_platform_data, + }, +#endif }; #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) @@ -2047,6 +2111,16 @@ static struct platform_device *stamp_devices[] __initdata = { #if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) &bfin_ac97, #endif +#if defined(CONFIG_REGULATOR_AD5398) || defined(CONFIG_REGULATOR_AD5398_MODULE) +#if defined(CONFIG_REGULATOR_VIRTUAL_CONSUMER) || \ + defined(CONFIG_REGULATOR_VIRTUAL_CONSUMER_MODULE) + &ad5398_virt_consumer_device, +#endif +#if defined(CONFIG_REGULATOR_USERSPACE_CONSUMER) || \ + defined(CONFIG_REGULATOR_USERSPACE_CONSUMER_MODULE) + &ad5398_userspace_consumer_device, +#endif +#endif }; static int __init stamp_init(void) -- cgit v1.2.3 From f5f9531c7e588ee62e3aeddb14613ea80e7c2ca2 Mon Sep 17 00:00:00 2001 From: Graf Yang Date: Wed, 10 Feb 2010 07:15:59 +0000 Subject: Blackfin: bf537-stamp: add example AD2S90 resources Signed-off-by: Graf Yang Signed-off-by: Mike Frysinger --- arch/blackfin/mach-bf537/boards/stamp.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c index 845629260290..4a09ce90ca4b 100644 --- a/arch/blackfin/mach-bf537/boards/stamp.c +++ b/arch/blackfin/mach-bf537/boards/stamp.c @@ -653,6 +653,13 @@ static struct ad714x_platform_data ad7142_i2c_platform_data = { }; #endif +#if defined(CONFIG_AD2S90) || defined(CONFIG_AD2S90_MODULE) +static struct bfin5xx_spi_chip ad2s90_spi_chip_info = { + .enable_dma = 0, + .bits_per_word = 16, +}; +#endif + #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) #define MMC_SPI_CARD_DETECT_INT IRQ_PF5 @@ -967,6 +974,16 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { }, #endif +#if defined(CONFIG_AD2S90) || defined(CONFIG_AD2S90_MODULE) + { + .modalias = "ad2s90", + .bus_num = 0, + .chip_select = 3, /* change it for your board */ + .platform_data = NULL, + .controller_data = &ad2s90_spi_chip_info, + }, +#endif + #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) { .modalias = "mmc_spi", -- cgit v1.2.3 From f8e6dbffa7a6cb3da3bcaf1fde3039896e1ac764 Mon Sep 17 00:00:00 2001 From: Sonic Zhang Date: Wed, 10 Feb 2010 09:09:05 +0000 Subject: Blackfin: bf537-stamp: add example ADP122/ADP150 power regulator resources Signed-off-by: Sonic Zhang Signed-off-by: Mike Frysinger --- arch/blackfin/mach-bf537/boards/stamp.c | 99 +++++++++++++++++++++++++++++++++ 1 file changed, 99 insertions(+) diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c index 4a09ce90ca4b..5f189518697a 100644 --- a/arch/blackfin/mach-bf537/boards/stamp.c +++ b/arch/blackfin/mach-bf537/boards/stamp.c @@ -35,6 +35,9 @@ #include #include #include +#ifdef CONFIG_REGULATOR_ADP_SWITCH +#include +#endif #ifdef CONFIG_REGULATOR_AD5398 #include #endif @@ -2008,6 +2011,93 @@ static struct platform_device bfin_ac97 = { }; #endif +#if defined(CONFIG_REGULATOR_ADP_SWITCH) || defined(CONFIG_REGULATOR_ADP_SWITCH_MODULE) +#define REGULATOR_ADP122 "adp122" +#define REGULATOR_ADP150 "adp150" + +static struct regulator_consumer_supply adp122_consumers = { + .supply = REGULATOR_ADP122, +}; + +static struct regulator_consumer_supply adp150_consumers = { + .supply = REGULATOR_ADP150, +}; + +static struct regulator_init_data adp_switch_regulator_data[] = { + { + .constraints = { + .name = REGULATOR_ADP122, + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + .num_consumer_supplies = 1, /* only 1 */ + .consumer_supplies = &adp122_consumers, + .driver_data = (void *)GPIO_PF2, /* gpio port only */ + }, + { + .constraints = { + .name = REGULATOR_ADP150, + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + .num_consumer_supplies = 1, /* only 1 */ + .consumer_supplies = &adp150_consumers, + .driver_data = (void *)GPIO_PF3, /* gpio port only */ + }, +}; + +static struct adp_switch_platform_data adp_switch_pdata = { + .regulator_num = ARRAY_SIZE(adp_switch_regulator_data), + .regulator_data = adp_switch_regulator_data, +}; + +static struct platform_device adp_switch_device = { + .name = "adp_switch", + .id = 0, + .dev = { + .platform_data = &adp_switch_pdata, + }, +}; + +#if defined(CONFIG_REGULATOR_USERSPACE_CONSUMER) || \ + defined(CONFIG_REGULATOR_USERSPACE_CONSUMER_MODULE) +static struct regulator_bulk_data adp122_bulk_data = { + .supply = REGULATOR_ADP122, +}; + +static struct regulator_userspace_consumer_data adp122_userspace_comsumer_data = { + .name = REGULATOR_ADP122, + .num_supplies = 1, + .supplies = &adp122_bulk_data, +}; + +static struct platform_device adp122_userspace_consumer_device = { + .name = "reg-userspace-consumer", + .id = 0, + .dev = { + .platform_data = &adp122_userspace_comsumer_data, + }, +}; + +static struct regulator_bulk_data adp150_bulk_data = { + .supply = REGULATOR_ADP150, +}; + +static struct regulator_userspace_consumer_data adp150_userspace_comsumer_data = { + .name = REGULATOR_ADP150, + .num_supplies = 1, + .supplies = &adp150_bulk_data, +}; + +static struct platform_device adp150_userspace_consumer_device = { + .name = "reg-userspace-consumer", + .id = 1, + .dev = { + .platform_data = &adp150_userspace_comsumer_data, + }, +}; +#endif +#endif + + static struct platform_device *stamp_devices[] __initdata = { &bfin_dpmc, @@ -2138,6 +2228,15 @@ static struct platform_device *stamp_devices[] __initdata = { &ad5398_userspace_consumer_device, #endif #endif + +#if defined(CONFIG_REGULATOR_ADP_SWITCH) || defined(CONFIG_REGULATOR_ADP_SWITCH_MODULE) + &adp_switch_device, +#if defined(CONFIG_REGULATOR_USERSPACE_CONSUMER) || \ + defined(CONFIG_REGULATOR_USERSPACE_CONSUMER_MODULE) + &adp122_userspace_consumer_device, + &adp150_userspace_consumer_device, +#endif +#endif }; static int __init stamp_init(void) -- cgit v1.2.3 From 7f4f69f991146fa976cbc914a50285b2afc0ad93 Mon Sep 17 00:00:00 2001 From: Joachim Eastwood Date: Wed, 10 Feb 2010 12:31:41 +0100 Subject: Blackfin: GPIO: implement to_irq handler This makes it possible to support IRQs coming from off-chip GPIO controllers. Signed-off-by: Joachim Eastwood Signed-off-by: Mike Frysinger --- arch/blackfin/include/asm/gpio.h | 17 +++++++++++++---- arch/blackfin/kernel/bfin_gpio.c | 6 ++++++ 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/arch/blackfin/include/asm/gpio.h b/arch/blackfin/include/asm/gpio.h index 539468a05057..91bd2d7b9d55 100644 --- a/arch/blackfin/include/asm/gpio.h +++ b/arch/blackfin/include/asm/gpio.h @@ -70,6 +70,8 @@ #ifndef __ASSEMBLY__ +#include + /*********************************************************** * * FUNCTIONS: Blackfin General Purpose Ports Access Functions @@ -223,6 +225,9 @@ int bfin_gpio_direction_output(unsigned gpio, int value); int bfin_gpio_get_value(unsigned gpio); void bfin_gpio_set_value(unsigned gpio, int value); +#include +#include + #ifdef CONFIG_GPIOLIB #include /* cansleep wrappers */ @@ -247,6 +252,11 @@ static inline int gpio_cansleep(unsigned int gpio) return __gpio_cansleep(gpio); } +static inline int gpio_to_irq(unsigned gpio) +{ + return __gpio_to_irq(gpio); +} + #else /* !CONFIG_GPIOLIB */ static inline int gpio_request(unsigned gpio, const char *label) @@ -279,10 +289,6 @@ static inline void gpio_set_value(unsigned gpio, int value) return bfin_gpio_set_value(gpio, value); } -#include /* cansleep wrappers */ -#endif /* !CONFIG_GPIOLIB */ -#include - static inline int gpio_to_irq(unsigned gpio) { if (likely(gpio < MAX_BLACKFIN_GPIOS)) @@ -291,6 +297,9 @@ static inline int gpio_to_irq(unsigned gpio) return -EINVAL; } +#include /* cansleep wrappers */ +#endif /* !CONFIG_GPIOLIB */ + static inline int irq_to_gpio(unsigned irq) { return (irq - GPIO_IRQ_BASE); diff --git a/arch/blackfin/kernel/bfin_gpio.c b/arch/blackfin/kernel/bfin_gpio.c index 0dd9cf913503..6dcb344001f0 100644 --- a/arch/blackfin/kernel/bfin_gpio.c +++ b/arch/blackfin/kernel/bfin_gpio.c @@ -1319,6 +1319,11 @@ void bfin_gpiolib_gpio_free(struct gpio_chip *chip, unsigned gpio) return bfin_gpio_free(gpio); } +int bfin_gpiolib_gpio_to_irq(struct gpio_chip *chip, unsigned gpio) +{ + return gpio + GPIO_IRQ_BASE; +} + static struct gpio_chip bfin_chip = { .label = "BFIN-GPIO", .direction_input = bfin_gpiolib_direction_input, @@ -1327,6 +1332,7 @@ static struct gpio_chip bfin_chip = { .set = bfin_gpiolib_set_value, .request = bfin_gpiolib_gpio_request, .free = bfin_gpiolib_gpio_free, + .to_irq = bfin_gpiolib_gpio_to_irq, .base = 0, .ngpio = MAX_BLACKFIN_GPIOS, }; -- cgit v1.2.3 From c48d767569ec6449277bf4248295b4c165b57159 Mon Sep 17 00:00:00 2001 From: Cliff Cai Date: Thu, 11 Feb 2010 09:27:18 +0000 Subject: Blackfin: bf537-stamp: add example ADAU1361 resources Signed-off-by: Cliff Cai Signed-off-by: Mike Frysinger --- arch/blackfin/mach-bf537/boards/stamp.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c index 5f189518697a..8a138f0f0302 100644 --- a/arch/blackfin/mach-bf537/boards/stamp.c +++ b/arch/blackfin/mach-bf537/boards/stamp.c @@ -1811,6 +1811,11 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = { I2C_BOARD_INFO("adau1761", 0x38), }, #endif +#if defined(CONFIG_SND_SOC_ADAU1361) || defined(CONFIG_SND_SOC_ADAU1361_MODULE) + { + I2C_BOARD_INFO("adau1361", 0x38), + }, +#endif #if defined(CONFIG_AD525X_DPOT) || defined(CONFIG_AD525X_DPOT_MODULE) { I2C_BOARD_INFO("ad5258", 0x18), -- cgit v1.2.3 From f9c29e872b1c468d09c04ed452dc58961914c9d7 Mon Sep 17 00:00:00 2001 From: Joachim Eastwood Date: Thu, 11 Feb 2010 12:41:11 +0100 Subject: Blackfin: mark gpio lib functions static Signed-off-by: Joachim Eastwood Signed-off-by: Mike Frysinger --- arch/blackfin/kernel/bfin_gpio.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/blackfin/kernel/bfin_gpio.c b/arch/blackfin/kernel/bfin_gpio.c index 6dcb344001f0..e35e20f00d9b 100644 --- a/arch/blackfin/kernel/bfin_gpio.c +++ b/arch/blackfin/kernel/bfin_gpio.c @@ -1289,37 +1289,37 @@ __initcall(gpio_register_proc); #endif #ifdef CONFIG_GPIOLIB -int bfin_gpiolib_direction_input(struct gpio_chip *chip, unsigned gpio) +static int bfin_gpiolib_direction_input(struct gpio_chip *chip, unsigned gpio) { return bfin_gpio_direction_input(gpio); } -int bfin_gpiolib_direction_output(struct gpio_chip *chip, unsigned gpio, int level) +static int bfin_gpiolib_direction_output(struct gpio_chip *chip, unsigned gpio, int level) { return bfin_gpio_direction_output(gpio, level); } -int bfin_gpiolib_get_value(struct gpio_chip *chip, unsigned gpio) +static int bfin_gpiolib_get_value(struct gpio_chip *chip, unsigned gpio) { return bfin_gpio_get_value(gpio); } -void bfin_gpiolib_set_value(struct gpio_chip *chip, unsigned gpio, int value) +static void bfin_gpiolib_set_value(struct gpio_chip *chip, unsigned gpio, int value) { return bfin_gpio_set_value(gpio, value); } -int bfin_gpiolib_gpio_request(struct gpio_chip *chip, unsigned gpio) +static int bfin_gpiolib_gpio_request(struct gpio_chip *chip, unsigned gpio) { return bfin_gpio_request(gpio, chip->label); } -void bfin_gpiolib_gpio_free(struct gpio_chip *chip, unsigned gpio) +static void bfin_gpiolib_gpio_free(struct gpio_chip *chip, unsigned gpio) { return bfin_gpio_free(gpio); } -int bfin_gpiolib_gpio_to_irq(struct gpio_chip *chip, unsigned gpio) +static int bfin_gpiolib_gpio_to_irq(struct gpio_chip *chip, unsigned gpio) { return gpio + GPIO_IRQ_BASE; } -- cgit v1.2.3 From b2740801457b2fbbe14812c0fda24bd689025886 Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Tue, 16 Feb 2010 04:03:10 -0500 Subject: Blackfin: asm/page.h: pull in asm-generic headers Signed-off-by: Mike Frysinger --- arch/blackfin/include/asm/page.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/blackfin/include/asm/page.h b/arch/blackfin/include/asm/page.h index 1d04e4078340..d0ce975bcd48 100644 --- a/arch/blackfin/include/asm/page.h +++ b/arch/blackfin/include/asm/page.h @@ -15,4 +15,7 @@ ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#include +#include + #endif -- cgit v1.2.3 From aec59c911307639c77076bdc9d9b546a4a767a73 Mon Sep 17 00:00:00 2001 From: Michael Hennerich Date: Fri, 19 Feb 2010 15:09:10 +0000 Subject: Blackfin: add support for the on-chip MAC status interrupts This patch provides infrastructure for MAC Wake-On-Lan and PHYINT use in phylib. New Interrupts added: IRQ_MAC_PHYINT /* PHY_INT Interrupt */ IRQ_MAC_MMCINT /* MMC Counter Interrupt */ IRQ_MAC_RXFSINT /* RX Frame-Status Interrupt */ IRQ_MAC_TXFSINT /* TX Frame-Status Interrupt */ IRQ_MAC_WAKEDET /* Wake-Up Interrupt */ IRQ_MAC_RXDMAERR /* RX DMA Direction Error Interrupt */ IRQ_MAC_TXDMAERR /* TX DMA Direction Error Interrupt */ IRQ_MAC_STMDONE /* Station Mgt. Transfer Done Interrupt */ On BF537/6 the implementation is not straight forward since there are now two chained chained_handlers. A cleaner approach would have been to add latter IRQs to the demux of IRQ_GENERIC_ERROR. Signed-off-by: Michael Hennerich Signed-off-by: Mike Frysinger --- arch/blackfin/mach-bf518/include/mach/irq.h | 11 ++- arch/blackfin/mach-bf527/include/mach/irq.h | 11 ++- arch/blackfin/mach-bf537/include/mach/irq.h | 11 ++- arch/blackfin/mach-common/ints-priority.c | 140 +++++++++++++++++++++++++++- 4 files changed, 166 insertions(+), 7 deletions(-) diff --git a/arch/blackfin/mach-bf518/include/mach/irq.h b/arch/blackfin/mach-bf518/include/mach/irq.h index 52edc8483911..435e76e31aaa 100644 --- a/arch/blackfin/mach-bf518/include/mach/irq.h +++ b/arch/blackfin/mach-bf518/include/mach/irq.h @@ -151,7 +151,16 @@ #define GPIO_IRQ_BASE IRQ_PF0 -#define NR_MACH_IRQS (IRQ_PH15 + 1) +#define IRQ_MAC_PHYINT 119 /* PHY_INT Interrupt */ +#define IRQ_MAC_MMCINT 120 /* MMC Counter Interrupt */ +#define IRQ_MAC_RXFSINT 121 /* RX Frame-Status Interrupt */ +#define IRQ_MAC_TXFSINT 122 /* TX Frame-Status Interrupt */ +#define IRQ_MAC_WAKEDET 123 /* Wake-Up Interrupt */ +#define IRQ_MAC_RXDMAERR 124 /* RX DMA Direction Error Interrupt */ +#define IRQ_MAC_TXDMAERR 125 /* TX DMA Direction Error Interrupt */ +#define IRQ_MAC_STMDONE 126 /* Station Mgt. Transfer Done Interrupt */ + +#define NR_MACH_IRQS (IRQ_MAC_STMDONE + 1) #define NR_IRQS (NR_MACH_IRQS + NR_SPARE_IRQS) #define IVG7 7 diff --git a/arch/blackfin/mach-bf527/include/mach/irq.h b/arch/blackfin/mach-bf527/include/mach/irq.h index 17604b4a81cc..704d9253e41d 100644 --- a/arch/blackfin/mach-bf527/include/mach/irq.h +++ b/arch/blackfin/mach-bf527/include/mach/irq.h @@ -151,7 +151,16 @@ #define GPIO_IRQ_BASE IRQ_PF0 -#define NR_MACH_IRQS (IRQ_PH15 + 1) +#define IRQ_MAC_PHYINT 119 /* PHY_INT Interrupt */ +#define IRQ_MAC_MMCINT 120 /* MMC Counter Interrupt */ +#define IRQ_MAC_RXFSINT 121 /* RX Frame-Status Interrupt */ +#define IRQ_MAC_TXFSINT 122 /* TX Frame-Status Interrupt */ +#define IRQ_MAC_WAKEDET 123 /* Wake-Up Interrupt */ +#define IRQ_MAC_RXDMAERR 124 /* RX DMA Direction Error Interrupt */ +#define IRQ_MAC_TXDMAERR 125 /* TX DMA Direction Error Interrupt */ +#define IRQ_MAC_STMDONE 126 /* Station Mgt. Transfer Done Interrupt */ + +#define NR_MACH_IRQS (IRQ_MAC_STMDONE + 1) #define NR_IRQS (NR_MACH_IRQS + NR_SPARE_IRQS) #define IVG7 7 diff --git a/arch/blackfin/mach-bf537/include/mach/irq.h b/arch/blackfin/mach-bf537/include/mach/irq.h index 9b2cbcac98e0..789a4f226f7b 100644 --- a/arch/blackfin/mach-bf537/include/mach/irq.h +++ b/arch/blackfin/mach-bf537/include/mach/irq.h @@ -134,7 +134,16 @@ #define GPIO_IRQ_BASE IRQ_PF0 -#define NR_MACH_IRQS (IRQ_PH15 + 1) +#define IRQ_MAC_PHYINT 98 /* PHY_INT Interrupt */ +#define IRQ_MAC_MMCINT 99 /* MMC Counter Interrupt */ +#define IRQ_MAC_RXFSINT 100 /* RX Frame-Status Interrupt */ +#define IRQ_MAC_TXFSINT 101 /* TX Frame-Status Interrupt */ +#define IRQ_MAC_WAKEDET 102 /* Wake-Up Interrupt */ +#define IRQ_MAC_RXDMAERR 103 /* RX DMA Direction Error Interrupt */ +#define IRQ_MAC_TXDMAERR 104 /* TX DMA Direction Error Interrupt */ +#define IRQ_MAC_STMDONE 105 /* Station Mgt. Transfer Done Interrupt */ + +#define NR_MACH_IRQS (IRQ_MAC_STMDONE + 1) #define NR_IRQS (NR_MACH_IRQS + NR_SPARE_IRQS) #define IVG7 7 diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c index ebf886091187..11c05f3e178b 100644 --- a/arch/blackfin/mach-common/ints-priority.c +++ b/arch/blackfin/mach-common/ints-priority.c @@ -325,7 +325,6 @@ static int error_int_mask; static void bfin_generic_error_mask_irq(unsigned int irq) { error_int_mask &= ~(1L << (irq - IRQ_PPI_ERROR)); - if (!error_int_mask) bfin_internal_mask_irq(IRQ_GENERIC_ERROR); } @@ -416,6 +415,127 @@ static void bfin_demux_error_irq(unsigned int int_err_irq, } #endif /* BF537_GENERIC_ERROR_INT_DEMUX */ +#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) +static int mac_stat_int_mask; + +static void bfin_mac_status_ack_irq(unsigned int irq) +{ + switch (irq) { + case IRQ_MAC_MMCINT: + bfin_write_EMAC_MMC_TIRQS( + bfin_read_EMAC_MMC_TIRQE() & + bfin_read_EMAC_MMC_TIRQS()); + bfin_write_EMAC_MMC_RIRQS( + bfin_read_EMAC_MMC_RIRQE() & + bfin_read_EMAC_MMC_RIRQS()); + break; + case IRQ_MAC_RXFSINT: + bfin_write_EMAC_RX_STKY( + bfin_read_EMAC_RX_IRQE() & + bfin_read_EMAC_RX_STKY()); + break; + case IRQ_MAC_TXFSINT: + bfin_write_EMAC_TX_STKY( + bfin_read_EMAC_TX_IRQE() & + bfin_read_EMAC_TX_STKY()); + break; + case IRQ_MAC_WAKEDET: + bfin_write_EMAC_WKUP_CTL( + bfin_read_EMAC_WKUP_CTL() | MPKS | RWKS); + break; + default: + /* These bits are W1C */ + bfin_write_EMAC_SYSTAT(1L << (irq - IRQ_MAC_PHYINT)); + break; + } +} + +static void bfin_mac_status_mask_irq(unsigned int irq) +{ + mac_stat_int_mask &= ~(1L << (irq - IRQ_MAC_PHYINT)); +#ifdef BF537_GENERIC_ERROR_INT_DEMUX + switch (irq) { + case IRQ_MAC_PHYINT: + bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() & ~PHYIE); + break; + default: + break; + } +#else + if (!mac_stat_int_mask) + bfin_internal_mask_irq(IRQ_MAC_ERROR); +#endif + bfin_mac_status_ack_irq(irq); +} + +static void bfin_mac_status_unmask_irq(unsigned int irq) +{ +#ifdef BF537_GENERIC_ERROR_INT_DEMUX + switch (irq) { + case IRQ_MAC_PHYINT: + bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() | PHYIE); + break; + default: + break; + } +#else + if (!mac_stat_int_mask) + bfin_internal_unmask_irq(IRQ_MAC_ERROR); +#endif + mac_stat_int_mask |= 1L << (irq - IRQ_MAC_PHYINT); +} + +#ifdef CONFIG_PM +int bfin_mac_status_set_wake(unsigned int irq, unsigned int state) +{ +#ifdef BF537_GENERIC_ERROR_INT_DEMUX + return bfin_internal_set_wake(IRQ_GENERIC_ERROR, state); +#else + return bfin_internal_set_wake(IRQ_MAC_ERROR, state); +#endif +} +#endif + +static struct irq_chip bfin_mac_status_irqchip = { + .name = "MACST", + .ack = bfin_ack_noop, + .mask_ack = bfin_mac_status_mask_irq, + .mask = bfin_mac_status_mask_irq, + .unmask = bfin_mac_status_unmask_irq, +#ifdef CONFIG_PM + .set_wake = bfin_mac_status_set_wake, +#endif +}; + +static void bfin_demux_mac_status_irq(unsigned int int_err_irq, + struct irq_desc *inta_desc) +{ + int i, irq = 0; + u32 status = bfin_read_EMAC_SYSTAT(); + + for (i = 0; i < (IRQ_MAC_STMDONE - IRQ_MAC_PHYINT); i++) + if (status & (1L << i)) { + irq = IRQ_MAC_PHYINT + i; + break; + } + + if (irq) { + if (mac_stat_int_mask & (1L << (irq - IRQ_MAC_PHYINT))) { + bfin_handle_irq(irq); + } else { + bfin_mac_status_ack_irq(irq); + pr_debug("IRQ %d:" + " MASKED MAC ERROR INTERRUPT ASSERTED\n", + irq); + } + } else + printk(KERN_ERR + "%s : %s : LINE %d :\nIRQ ?: MAC ERROR" + " INTERRUPT ASSERTED BUT NO SOURCE FOUND\n", + __func__, __FILE__, __LINE__); +} +#endif + static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle) { #ifdef CONFIG_IPIPE @@ -1070,7 +1190,11 @@ int __init init_arch_irq(void) set_irq_chained_handler(irq, bfin_demux_error_irq); break; #endif - +#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) + case IRQ_MAC_ERROR: + set_irq_chained_handler(irq, bfin_demux_mac_status_irq); + break; +#endif #ifdef CONFIG_SMP case IRQ_SUPPLE_0: case IRQ_SUPPLE_1: @@ -1111,14 +1235,22 @@ int __init init_arch_irq(void) for (irq = IRQ_PPI_ERROR; irq <= IRQ_UART1_ERROR; irq++) set_irq_chip_and_handler(irq, &bfin_generic_error_irqchip, handle_level_irq); +#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) + set_irq_chained_handler(IRQ_MAC_ERROR, bfin_demux_mac_status_irq); +#endif #endif +#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) + for (irq = IRQ_MAC_PHYINT; irq <= IRQ_MAC_STMDONE; irq++) + set_irq_chip_and_handler(irq, &bfin_mac_status_irqchip, + handle_level_irq); +#endif /* if configured as edge, then will be changed to do_edge_IRQ */ - for (irq = GPIO_IRQ_BASE; irq < NR_MACH_IRQS; irq++) + for (irq = GPIO_IRQ_BASE; + irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++) set_irq_chip_and_handler(irq, &bfin_gpio_irqchip, handle_level_irq); - bfin_write_IMASK(0); CSYNC(); ilat = bfin_read_ILAT(); -- cgit v1.2.3 From df6a949b4666780969fd90a2f3ac3db3b62552d6 Mon Sep 17 00:00:00 2001 From: Graf Yang Date: Sun, 21 Feb 2010 10:23:07 +0000 Subject: Blackfin: bf537-stamp: add example AD2S120x resources Signed-off-by: Graf Yang Signed-off-by: Mike Frysinger --- arch/blackfin/mach-bf537/boards/stamp.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c index 8a138f0f0302..283cc66f0220 100644 --- a/arch/blackfin/mach-bf537/boards/stamp.c +++ b/arch/blackfin/mach-bf537/boards/stamp.c @@ -663,6 +663,18 @@ static struct bfin5xx_spi_chip ad2s90_spi_chip_info = { }; #endif +#if defined(CONFIG_AD2S120X) || defined(CONFIG_AD2S120X_MODULE) +unsigned short ad2s120x_platform_data[] = { + /* used as SAMPLE and RDVEL */ + GPIO_PF5, GPIO_PF6, 0 +}; + +static struct bfin5xx_spi_chip ad2s120x_spi_chip_info = { + .enable_dma = 0, + .bits_per_word = 16, +}; +#endif + #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) #define MMC_SPI_CARD_DETECT_INT IRQ_PF5 @@ -987,6 +999,16 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { }, #endif +#if defined(CONFIG_AD2S120X) || defined(CONFIG_AD2S120X_MODULE) + { + .modalias = "ad2s120x", + .bus_num = 0, + .chip_select = 4, /* CS, change it for your board */ + .platform_data = ad2s120x_platform_data, + .controller_data = &ad2s120x_spi_chip_info, + }, +#endif + #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) { .modalias = "mmc_spi", -- cgit v1.2.3 From d40bd71f88e7be193ce4feb4b92572c70024b9c2 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Mon, 22 Feb 2010 10:31:06 +0000 Subject: Blackfin: rename AD1836 to AD183X in board files The ASoC codec driver was generalized and renamed, so update the board resources accordingly. Signed-off-by: Barry Song Signed-off-by: Mike Frysinger --- arch/blackfin/mach-bf527/boards/cm_bf527.c | 8 ++++---- arch/blackfin/mach-bf527/boards/ezkit.c | 8 ++++---- arch/blackfin/mach-bf533/boards/H8606.c | 4 ++-- arch/blackfin/mach-bf533/boards/cm_bf533.c | 4 ++-- arch/blackfin/mach-bf533/boards/ezkit.c | 4 ++-- arch/blackfin/mach-bf533/boards/stamp.c | 4 ++-- arch/blackfin/mach-bf537/boards/cm_bf537e.c | 4 ++-- arch/blackfin/mach-bf537/boards/cm_bf537u.c | 4 ++-- arch/blackfin/mach-bf537/boards/pnav10.c | 8 ++++---- arch/blackfin/mach-bf537/boards/stamp.c | 11 ++++++----- arch/blackfin/mach-bf537/boards/tcm_bf537.c | 4 ++-- arch/blackfin/mach-bf548/boards/ezkit.c | 8 ++++---- arch/blackfin/mach-bf561/boards/cm_bf561.c | 4 ++-- arch/blackfin/mach-bf561/boards/ezkit.c | 8 ++++---- 14 files changed, 42 insertions(+), 41 deletions(-) diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c index d6efdfa7d4e0..ebe76d1e874a 100644 --- a/arch/blackfin/mach-bf527/boards/cm_bf527.c +++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c @@ -339,8 +339,8 @@ static struct bfin5xx_spi_chip spi_adc_chip_info = { }; #endif -#if defined(CONFIG_SND_BLACKFIN_AD1836) \ - || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) +#if defined(CONFIG_SND_BLACKFIN_AD183X) \ + || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE) static struct bfin5xx_spi_chip ad1836_spi_chip_info = { .enable_dma = 0, .bits_per_word = 16, @@ -417,8 +417,8 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { }, #endif -#if defined(CONFIG_SND_BLACKFIN_AD1836) \ - || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) +#if defined(CONFIG_SND_BLACKFIN_AD183X) \ + || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE) { .modalias = "ad1836", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c index 3d32a35427a6..923383386aa1 100644 --- a/arch/blackfin/mach-bf527/boards/ezkit.c +++ b/arch/blackfin/mach-bf527/boards/ezkit.c @@ -428,8 +428,8 @@ static struct bfin5xx_spi_chip spi_adc_chip_info = { }; #endif -#if defined(CONFIG_SND_BLACKFIN_AD1836) \ - || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) +#if defined(CONFIG_SND_BLACKFIN_AD183X) \ + || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE) static struct bfin5xx_spi_chip ad1836_spi_chip_info = { .enable_dma = 0, .bits_per_word = 16, @@ -544,8 +544,8 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { }, #endif -#if defined(CONFIG_SND_BLACKFIN_AD1836) \ - || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) +#if defined(CONFIG_SND_BLACKFIN_AD183X) \ + || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE) { .modalias = "ad1836", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ diff --git a/arch/blackfin/mach-bf533/boards/H8606.c b/arch/blackfin/mach-bf533/boards/H8606.c index d10c8e8b69cf..175371af0692 100644 --- a/arch/blackfin/mach-bf533/boards/H8606.c +++ b/arch/blackfin/mach-bf533/boards/H8606.c @@ -171,7 +171,7 @@ static struct bfin5xx_spi_chip spi_adc_chip_info = { }; #endif -#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) +#if defined(CONFIG_SND_BLACKFIN_AD183X) || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE) static struct bfin5xx_spi_chip ad1836_spi_chip_info = { .enable_dma = 0, .bits_per_word = 16, @@ -206,7 +206,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { }, #endif -#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) +#if defined(CONFIG_SND_BLACKFIN_AD183X) || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE) { .modalias = "ad1836", .max_speed_hz = 16, diff --git a/arch/blackfin/mach-bf533/boards/cm_bf533.c b/arch/blackfin/mach-bf533/boards/cm_bf533.c index 0592fe0bb8af..fdcde61906dc 100644 --- a/arch/blackfin/mach-bf533/boards/cm_bf533.c +++ b/arch/blackfin/mach-bf533/boards/cm_bf533.c @@ -71,7 +71,7 @@ static struct bfin5xx_spi_chip spi_adc_chip_info = { }; #endif -#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) +#if defined(CONFIG_SND_BLACKFIN_AD183X) || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE) static struct bfin5xx_spi_chip ad1836_spi_chip_info = { .enable_dma = 0, .bits_per_word = 16, @@ -110,7 +110,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { }, #endif -#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) +#if defined(CONFIG_SND_BLACKFIN_AD183X) || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE) { .modalias = "ad1836", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ diff --git a/arch/blackfin/mach-bf533/boards/ezkit.c b/arch/blackfin/mach-bf533/boards/ezkit.c index 84efda193847..739773cb7fc6 100644 --- a/arch/blackfin/mach-bf533/boards/ezkit.c +++ b/arch/blackfin/mach-bf533/boards/ezkit.c @@ -222,7 +222,7 @@ static struct bfin5xx_spi_chip spi_adc_chip_info = { }; #endif -#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) +#if defined(CONFIG_SND_BLACKFIN_AD183X) || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE) static struct bfin5xx_spi_chip ad1836_spi_chip_info = { .enable_dma = 0, .bits_per_word = 16, @@ -261,7 +261,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { }, #endif -#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) +#if defined(CONFIG_SND_BLACKFIN_AD183X) || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE) { .modalias = "ad1836", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c index e997d7f9a671..c457eaa60239 100644 --- a/arch/blackfin/mach-bf533/boards/stamp.c +++ b/arch/blackfin/mach-bf533/boards/stamp.c @@ -184,7 +184,7 @@ static struct bfin5xx_spi_chip spi_adc_chip_info = { }; #endif -#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) +#if defined(CONFIG_SND_BLACKFIN_AD183X) || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE) static struct bfin5xx_spi_chip ad1836_spi_chip_info = { .enable_dma = 0, .bits_per_word = 16, @@ -251,7 +251,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { }, #endif -#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) +#if defined(CONFIG_SND_BLACKFIN_AD183X) || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE) { .modalias = "ad1836", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537e.c b/arch/blackfin/mach-bf537/boards/cm_bf537e.c index c54331282baa..d35fc5fe4c2b 100644 --- a/arch/blackfin/mach-bf537/boards/cm_bf537e.c +++ b/arch/blackfin/mach-bf537/boards/cm_bf537e.c @@ -73,7 +73,7 @@ static struct bfin5xx_spi_chip spi_adc_chip_info = { }; #endif -#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) +#if defined(CONFIG_SND_BLACKFIN_AD183X) || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE) static struct bfin5xx_spi_chip ad1836_spi_chip_info = { .enable_dma = 0, .bits_per_word = 16, @@ -112,7 +112,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { }, #endif -#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) +#if defined(CONFIG_SND_BLACKFIN_AD183X) || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE) { .modalias = "ad1836", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537u.c b/arch/blackfin/mach-bf537/boards/cm_bf537u.c index 18daacb00b2c..d464ad5b72b2 100644 --- a/arch/blackfin/mach-bf537/boards/cm_bf537u.c +++ b/arch/blackfin/mach-bf537/boards/cm_bf537u.c @@ -74,7 +74,7 @@ static struct bfin5xx_spi_chip spi_adc_chip_info = { }; #endif -#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) +#if defined(CONFIG_SND_BLACKFIN_AD183X) || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE) static struct bfin5xx_spi_chip ad1836_spi_chip_info = { .enable_dma = 0, .bits_per_word = 16, @@ -113,7 +113,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { }, #endif -#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) +#if defined(CONFIG_SND_BLACKFIN_AD183X) || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE) { .modalias = "ad1836", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ diff --git a/arch/blackfin/mach-bf537/boards/pnav10.c b/arch/blackfin/mach-bf537/boards/pnav10.c index a0f64ff0fb55..812e8f991601 100644 --- a/arch/blackfin/mach-bf537/boards/pnav10.c +++ b/arch/blackfin/mach-bf537/boards/pnav10.c @@ -175,8 +175,8 @@ static struct bfin5xx_spi_chip spi_adc_chip_info = { }; #endif -#if defined(CONFIG_SND_BLACKFIN_AD1836) \ - || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) +#if defined(CONFIG_SND_BLACKFIN_AD183X) \ + || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE) static struct bfin5xx_spi_chip ad1836_spi_chip_info = { .enable_dma = 0, .bits_per_word = 16, @@ -238,8 +238,8 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { }, #endif -#if defined(CONFIG_SND_BLACKFIN_AD1836) \ - || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) +#if defined(CONFIG_SND_BLACKFIN_AD183X) \ + || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE) { .modalias = "ad1836", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c index 283cc66f0220..e0f5663a8acb 100644 --- a/arch/blackfin/mach-bf537/boards/stamp.c +++ b/arch/blackfin/mach-bf537/boards/stamp.c @@ -528,8 +528,8 @@ static struct bfin5xx_spi_chip spi_adc_chip_info = { }; #endif -#if defined(CONFIG_SND_BF5XX_SOC_AD1836) \ - || defined(CONFIG_SND_BF5XX_SOC_AD1836_MODULE) +#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \ + || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) static struct bfin5xx_spi_chip ad1836_spi_chip_info = { .enable_dma = 0, .bits_per_word = 16, @@ -953,13 +953,14 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { }, #endif -#if defined(CONFIG_SND_BF5XX_SOC_AD1836) \ - || defined(CONFIG_SND_BF5XX_SOC_AD1836_MODULE) +#if defined(CONFIG_SND_BF5XX_SOC_AD183X) \ + || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) { - .modalias = "ad1836", + .modalias = "ad183x", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 4,/* CONFIG_SND_BLACKFIN_SPI_PFBIT */ + .platform_data = "ad1836", /* only includes chip name for the moment */ .controller_data = &ad1836_spi_chip_info, .mode = SPI_MODE_3, }, diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c index 59951cdef588..4f0a2e72ce4c 100644 --- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c +++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c @@ -74,7 +74,7 @@ static struct bfin5xx_spi_chip spi_adc_chip_info = { }; #endif -#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) +#if defined(CONFIG_SND_BLACKFIN_AD183X) || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE) static struct bfin5xx_spi_chip ad1836_spi_chip_info = { .enable_dma = 0, .bits_per_word = 16, @@ -113,7 +113,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { }, #endif -#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) +#if defined(CONFIG_SND_BLACKFIN_AD183X) || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE) { .modalias = "ad1836", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c index 5b3929957073..06919db00a74 100644 --- a/arch/blackfin/mach-bf548/boards/ezkit.c +++ b/arch/blackfin/mach-bf548/boards/ezkit.c @@ -963,8 +963,8 @@ static struct bfin5xx_spi_chip spi_flash_chip_info = { }; #endif -#if defined(CONFIG_SND_BLACKFIN_AD1836) \ - || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) +#if defined(CONFIG_SND_BLACKFIN_AD183X) \ + || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE) static struct bfin5xx_spi_chip ad1836_spi_chip_info = { .enable_dma = 0, .bits_per_word = 16, @@ -1020,8 +1020,8 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { .mode = SPI_MODE_3, }, #endif -#if defined(CONFIG_SND_BLACKFIN_AD1836) \ - || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) +#if defined(CONFIG_SND_BLACKFIN_AD183X) \ + || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE) { .modalias = "ad1836", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c index 37dd37523adf..e127aedc1d7f 100644 --- a/arch/blackfin/mach-bf561/boards/cm_bf561.c +++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c @@ -72,7 +72,7 @@ static struct bfin5xx_spi_chip spi_adc_chip_info = { }; #endif -#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) +#if defined(CONFIG_SND_BLACKFIN_AD183X) || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE) static struct bfin5xx_spi_chip ad1836_spi_chip_info = { .enable_dma = 0, .bits_per_word = 16, @@ -111,7 +111,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { }, #endif -#if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) +#if defined(CONFIG_SND_BLACKFIN_AD183X) || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE) { .modalias = "ad1836", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c index c7a5db79f838..9b93e2f95791 100644 --- a/arch/blackfin/mach-bf561/boards/ezkit.c +++ b/arch/blackfin/mach-bf561/boards/ezkit.c @@ -274,8 +274,8 @@ static struct platform_device ezkit_flash_device = { }; #endif -#if defined(CONFIG_SND_BLACKFIN_AD1836) \ - || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) +#if defined(CONFIG_SND_BLACKFIN_AD183X) \ + || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE) static struct bfin5xx_spi_chip ad1836_spi_chip_info = { .enable_dma = 0, .bits_per_word = 16, @@ -328,8 +328,8 @@ static struct platform_device bfin_spi0_device = { #endif static struct spi_board_info bfin_spi_board_info[] __initdata = { -#if defined(CONFIG_SND_BLACKFIN_AD1836) \ - || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) +#if defined(CONFIG_SND_BLACKFIN_AD183X) \ + || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE) { .modalias = "ad1836", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ -- cgit v1.2.3 From 5f0225948ecc4bb21cc35839d3d447e22d40a71f Mon Sep 17 00:00:00 2001 From: Sonic Zhang Date: Wed, 24 Feb 2010 07:32:50 +0000 Subject: Blackfin: bf537-stamp: add example AD7414 temp sensor resources Signed-off-by: Sonic Zhang Signed-off-by: Mike Frysinger --- arch/blackfin/mach-bf537/boards/stamp.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c index e0f5663a8acb..83489d899a14 100644 --- a/arch/blackfin/mach-bf537/boards/stamp.c +++ b/arch/blackfin/mach-bf537/boards/stamp.c @@ -1763,6 +1763,19 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = { }, #endif +#if defined(CONFIG_AD7414) || defined(CONFIG_AD7414_MODULE) + { + I2C_BOARD_INFO("ad7414", 0x9), + .irq = IRQ_PG5, + /* + * platform_data pointer is borrwoed by the driver to + * store custimer defined IRQ ALART level mode. + * only IRQF_TRIGGER_HIGH and IRQF_TRIGGER_LOW are valid. + */ + .platform_data = (void *)IRQF_TRIGGER_LOW, + }, +#endif + #if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE) { I2C_BOARD_INFO("pcf8574_lcd", 0x22), @@ -2056,6 +2069,8 @@ static struct regulator_init_data adp_switch_regulator_data[] = { .constraints = { .name = REGULATOR_ADP122, .valid_ops_mask = REGULATOR_CHANGE_STATUS, + .min_uA = 0, + .max_uA = 300000, }, .num_consumer_supplies = 1, /* only 1 */ .consumer_supplies = &adp122_consumers, @@ -2065,6 +2080,8 @@ static struct regulator_init_data adp_switch_regulator_data[] = { .constraints = { .name = REGULATOR_ADP150, .valid_ops_mask = REGULATOR_CHANGE_STATUS, + .min_uA = 0, + .max_uA = 150000, }, .num_consumer_supplies = 1, /* only 1 */ .consumer_supplies = &adp150_consumers, -- cgit v1.2.3 From 72fa2e9204d5efe4732346f99465a01c380f5cd3 Mon Sep 17 00:00:00 2001 From: Michael Hennerich Date: Wed, 24 Feb 2010 21:05:35 +0000 Subject: Blackfin: bf537-stamp: add example ADP8860 backlight/led resources Signed-off-by: Michael Hennerich Signed-off-by: Mike Frysinger --- arch/blackfin/mach-bf537/boards/stamp.c | 48 +++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c index 83489d899a14..8f929539b812 100644 --- a/arch/blackfin/mach-bf537/boards/stamp.c +++ b/arch/blackfin/mach-bf537/boards/stamp.c @@ -1682,6 +1682,48 @@ static struct adp8870_backlight_platform_data adp8870_pdata = { }; #endif +#if defined(CONFIG_BACKLIGHT_ADP8860) || defined(CONFIG_BACKLIGHT_ADP8860_MODULE) +#include +static struct led_info adp8860_leds[] = { + { + .name = "adp8860-led7", + .default_trigger = "none", + .flags = ADP8860_LED_D7 | ADP8860_LED_OFFT_600ms, + }, +}; + +static struct adp8860_backlight_platform_data adp8860_pdata = { + .bl_led_assign = ADP8860_BL_D1 | ADP8860_BL_D2 | ADP8860_BL_D3 | + ADP8860_BL_D4 | ADP8860_BL_D5 | ADP8860_BL_D6, /* 1 = Backlight 0 = Individual LED */ + + .bl_fade_in = ADP8860_FADE_T_1200ms, /* Backlight Fade-In Timer */ + .bl_fade_out = ADP8860_FADE_T_1200ms, /* Backlight Fade-Out Timer */ + .bl_fade_law = ADP8860_FADE_LAW_CUBIC1, /* fade-on/fade-off transfer characteristic */ + + .en_ambl_sens = 1, /* 1 = enable ambient light sensor */ + .abml_filt = ADP8860_BL_AMBL_FILT_320ms, /* Light sensor filter time */ + + .l1_daylight_max = ADP8860_BL_CUR_mA(20), /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + .l1_daylight_dim = ADP8860_BL_CUR_mA(0), /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + .l2_office_max = ADP8860_BL_CUR_mA(6), /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + .l2_office_dim = ADP8860_BL_CUR_mA(0), /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + .l3_dark_max = ADP8860_BL_CUR_mA(2), /* use BL_CUR_mA(I) 0 <= I <= 30 mA */ + .l3_dark_dim = ADP8860_BL_CUR_mA(0), /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */ + + .l2_trip = ADP8860_L2_COMP_CURR_uA(710), /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */ + .l2_hyst = ADP8860_L2_COMP_CURR_uA(73), /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */ + .l3_trip = ADP8860_L3_COMP_CURR_uA(43), /* use L3_COMP_CURR_uA(I) 0 <= I <= 138 uA */ + .l3_hyst = ADP8860_L3_COMP_CURR_uA(11), /* use L3_COMP_CURR_uA(I) 0 <= I <= 138 uA */ + + .leds = adp8860_leds, + .num_leds = ARRAY_SIZE(adp8860_leds), + .led_fade_law = ADP8860_FADE_LAW_SQUARE, /* fade-on/fade-off transfer characteristic */ + .led_fade_in = ADP8860_FADE_T_600ms, + .led_fade_out = ADP8860_FADE_T_600ms, + .led_on_time = ADP8860_LED_ONT_200ms, +}; +#endif + #if defined(CONFIG_REGULATOR_AD5398) || defined(CONFIG_REGULATOR_AD5398_MODULE) static struct regulator_consumer_supply ad5398_consumer = { .supply = "current", @@ -1868,6 +1910,12 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = { .platform_data = (void *)&ad5398_i2c_platform_data, }, #endif +#if defined(CONFIG_BACKLIGHT_ADP8860) || defined(CONFIG_BACKLIGHT_ADP8860_MODULE) + { + I2C_BOARD_INFO("adp8860", 0x2A), + .platform_data = (void *)&adp8860_pdata, + }, +#endif }; #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) -- cgit v1.2.3 From ef8873e06efdc023ee2e7f708787c79b78df3fcd Mon Sep 17 00:00:00 2001 From: Sonic Zhang Date: Thu, 25 Feb 2010 10:27:48 +0000 Subject: Blackfin: bf537-stamp: add example AD7416 IIO resources Signed-off-by: Sonic Zhang Signed-off-by: Mike Frysinger --- arch/blackfin/mach-bf537/boards/stamp.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c index 8f929539b812..4aec6c06f0fe 100644 --- a/arch/blackfin/mach-bf537/boards/stamp.c +++ b/arch/blackfin/mach-bf537/boards/stamp.c @@ -1818,6 +1818,19 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = { }, #endif +#if defined(CONFIG_AD7416) || defined(CONFIG_AD7416_MODULE) + { + I2C_BOARD_INFO("ad7417", 0xb), + .irq = IRQ_PG5, + /* + * platform_data pointer is borrwoed by the driver to + * store custimer defined IRQ ALART level mode. + * only IRQF_TRIGGER_HIGH and IRQF_TRIGGER_LOW are valid. + */ + .platform_data = (void *)IRQF_TRIGGER_LOW, + }, +#endif + #if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE) { I2C_BOARD_INFO("pcf8574_lcd", 0x22), -- cgit v1.2.3 From ae4a8c1903e5d0ec19dcb257ec922b888941abba Mon Sep 17 00:00:00 2001 From: Michael Hennerich Date: Thu, 25 Feb 2010 13:32:24 +0000 Subject: Blackfin: don't support keypad wakeup from hibernate The on-chip keypad peripheral requires different registers to be setup depending on the standby type (standby vs hibernation). However, since the power management framework doesn't differentiate between these types, the driver doesn't know which registers to program and subsequently it avoids doing so. Always enabling the keyboard wakeup source causes misbehavior when the pins are not assigned to the keypad. If they happen to drive a certain level, they'll trigger a wake up event which is not wanted. So until the aforementioned issue can be sorted out, drop support for the wakeup source completely. Signed-off-by: Michael Hennerich Signed-off-by: Mike Frysinger --- arch/blackfin/mach-common/ints-priority.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c index 11c05f3e178b..7ad8878bfa18 100644 --- a/arch/blackfin/mach-common/ints-priority.c +++ b/arch/blackfin/mach-common/ints-priority.c @@ -252,11 +252,6 @@ int bfin_internal_set_wake(unsigned int irq, unsigned int state) wakeup |= USBWE; break; #endif -#ifdef IRQ_KEY - case IRQ_KEY: - wakeup |= KPADWE; - break; -#endif #ifdef CONFIG_BF54x case IRQ_CNT: wakeup |= ROTWE; -- cgit v1.2.3 From 848c51ccee5c4d51b1dc1a029508cfbb73f8c260 Mon Sep 17 00:00:00 2001 From: Graf Yang Date: Fri, 26 Feb 2010 11:49:52 +0000 Subject: Blackfin: bf537-stamp: add example AD2S1210 IIO resources Signed-off-by: Graf Yang Signed-off-by: Mike Frysinger --- arch/blackfin/mach-bf537/boards/stamp.c | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c index 4aec6c06f0fe..772b7bab0a9b 100644 --- a/arch/blackfin/mach-bf537/boards/stamp.c +++ b/arch/blackfin/mach-bf537/boards/stamp.c @@ -675,6 +675,23 @@ static struct bfin5xx_spi_chip ad2s120x_spi_chip_info = { }; #endif +#if defined(CONFIG_AD2S1210) || defined(CONFIG_AD2S1210_MODULE) +unsigned short ad2s1210_platform_data[] = { + /* use as SAMPLE, A0, A1 */ + GPIO_PF7, GPIO_PF8, GPIO_PF9, +# if defined(CONFIG_AD2S1210_GPIO_INPUT) || defined(CONFIG_AD2S1210_GPIO_OUTPUT) + /* the RES0 and RES1 pins */ + GPIO_PF4, GPIO_PF5, +# endif + 0, +}; + +static struct bfin5xx_spi_chip ad2s1210_spi_chip_info = { + .enable_dma = 0, + .bits_per_word = 8, +}; +#endif + #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) #define MMC_SPI_CARD_DETECT_INT IRQ_PF5 @@ -1010,6 +1027,17 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = { }, #endif +#if defined(CONFIG_AD2S1210) || defined(CONFIG_AD2S1210_MODULE) + { + .modalias = "ad2s1210", + .max_speed_hz = 8192000, + .bus_num = 0, + .chip_select = 4, /* CS, change it for your board */ + .platform_data = ad2s1210_platform_data, + .controller_data = &ad2s1210_spi_chip_info, + }, +#endif + #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) { .modalias = "mmc_spi", -- cgit v1.2.3 From 6ba255f4a8a0785ea8a2b052837a7b91eeac1bb4 Mon Sep 17 00:00:00 2001 From: Michael Hennerich Date: Tue, 8 Dec 2009 11:34:07 +0000 Subject: Blackfin: bf537-stamp: add example ADXL346 orientation resources Signed-off-by: Michael Hennerich Signed-off-by: Mike Frysinger --- arch/blackfin/mach-bf537/boards/stamp.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c index 772b7bab0a9b..9eaf5b05c11e 100644 --- a/arch/blackfin/mach-bf537/boards/stamp.c +++ b/arch/blackfin/mach-bf537/boards/stamp.c @@ -734,11 +734,11 @@ static const struct ad7877_platform_data bfin_ad7877_ts_info = { .y_plate_ohms = 486, .pressure_max = 1000, .pressure_min = 0, - .stopacq_polarity = 1, - .first_conversion_delay = 3, - .acquisition_time = 1, - .averaging = 1, - .pen_down_acc_interval = 1, + .stopacq_polarity = 1, + .first_conversion_delay = 3, + .acquisition_time = 1, + .averaging = 1, + .pen_down_acc_interval = 1, }; #endif @@ -749,11 +749,11 @@ static const struct ad7879_platform_data bfin_ad7879_ts_info = { .x_plate_ohms = 620, /* 620 Ohm from the touch datasheet */ .pressure_max = 10000, .pressure_min = 0, - .first_conversion_delay = 3, /* wait 512us before do a first conversion */ - .acquisition_time = 1, /* 4us acquisition time per sample */ + .first_conversion_delay = 3, /* wait 512us before do a first conversion */ + .acquisition_time = 1, /* 4us acquisition time per sample */ .median = 2, /* do 8 measurements */ - .averaging = 1, /* take the average of 4 middle samples */ - .pen_down_acc_interval = 255, /* 9.4 ms */ + .averaging = 1, /* take the average of 4 middle samples */ + .pen_down_acc_interval = 255, /* 9.4 ms */ .gpio_export = 1, /* Export GPIO to gpiolib */ .gpio_base = -1, /* Dynamic allocation */ }; @@ -790,6 +790,11 @@ static const struct adxl34x_platform_data adxl34x_info = { /* .ev_code_act_inactivity = KEY_A,*/ /* EV_KEY */ .power_mode = ADXL_AUTO_SLEEP | ADXL_LINK, .fifo_mode = ADXL_FIFO_STREAM, + .orientation_enable = ADXL_EN_ORIENTATION_3D, + .deadzone_angle = ADXL_DEADZONE_ANGLE_10p8, + .divisor_length = ADXL_LP_FILTER_DIVISOR_16, + /* EV_KEY {+Z, +Y, +X, -X, -Y, -Z} */ + .ev_codes_orient_3d = {BTN_Z, BTN_Y, BTN_X, BTN_A, BTN_B, BTN_C}, }; #endif -- cgit v1.2.3 From 2943bff69e3728cfd69841537120125f9373bb28 Mon Sep 17 00:00:00 2001 From: Robin Getz Date: Thu, 25 Feb 2010 18:16:52 +0000 Subject: Blackfin: fix anomaly 283 handling with exact hardware error The exact hardware error handling code was added before the workaround for anomaly 283 which caused the anomaly to be triggered in some cases (an infinite core stall). So re-order the code to avoid this. Reported-by: Andrew Rook Signed-off-by: Robin Getz Signed-off-by: Mike Frysinger --- arch/blackfin/mach-common/entry.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S index 6c20044c7f4c..a5847f5d67c7 100644 --- a/arch/blackfin/mach-common/entry.S +++ b/arch/blackfin/mach-common/entry.S @@ -482,6 +482,8 @@ ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/ [--sp] = ASTAT; [--sp] = (R7:6,P5:4); + ANOMALY_283_315_WORKAROUND(p5, r7) + #ifdef CONFIG_EXACT_HWERR /* Make sure all pending read/writes complete. This will ensure any * accesses which could cause hardware errors completes, and signal @@ -492,8 +494,6 @@ ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/ ssync; #endif - ANOMALY_283_315_WORKAROUND(p5, r7) - #ifdef CONFIG_DEBUG_DOUBLEFAULT /* * Save these registers, as they are only valid in exception context -- cgit v1.2.3 From f2b0cd61cf635cbdc110472f6edc97328322cb8b Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Thu, 4 Mar 2010 07:35:30 -0500 Subject: Blackfin: eat spurious space in asm/dpmc.h Signed-off-by: Mike Frysinger --- arch/blackfin/include/asm/dpmc.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/blackfin/include/asm/dpmc.h b/arch/blackfin/include/asm/dpmc.h index 1597ae5041ee..efcc3aebeae4 100644 --- a/arch/blackfin/include/asm/dpmc.h +++ b/arch/blackfin/include/asm/dpmc.h @@ -75,7 +75,7 @@ #define VLEV 0x00F0 /* Internal Voltage Level */ #ifdef __ADSPBF52x__ -#define VLEV_085 0x0040 /* VLEV = 0.85 V (-5% - +10% Accuracy) */ +#define VLEV_085 0x0040 /* VLEV = 0.85 V (-5% - +10% Accuracy) */ #define VLEV_090 0x0050 /* VLEV = 0.90 V (-5% - +10% Accuracy) */ #define VLEV_095 0x0060 /* VLEV = 0.95 V (-5% - +10% Accuracy) */ #define VLEV_100 0x0070 /* VLEV = 1.00 V (-5% - +10% Accuracy) */ @@ -84,7 +84,7 @@ #define VLEV_115 0x00A0 /* VLEV = 1.15 V (-5% - +10% Accuracy) */ #define VLEV_120 0x00B0 /* VLEV = 1.20 V (-5% - +10% Accuracy) */ #else -#define VLEV_085 0x0060 /* VLEV = 0.85 V (-5% - +10% Accuracy) */ +#define VLEV_085 0x0060 /* VLEV = 0.85 V (-5% - +10% Accuracy) */ #define VLEV_090 0x0070 /* VLEV = 0.90 V (-5% - +10% Accuracy) */ #define VLEV_095 0x0080 /* VLEV = 0.95 V (-5% - +10% Accuracy) */ #define VLEV_100 0x0090 /* VLEV = 1.00 V (-5% - +10% Accuracy) */ -- cgit v1.2.3 From 7998a8787a8b35fede689c2bb716f65b231e7492 Mon Sep 17 00:00:00 2001 From: Graf Yang Date: Mon, 8 Mar 2010 03:01:35 +0000 Subject: Blackfin: scale calibration when cpu freq changes Need to make sure we update the loops_per_jiffy values when we start changing the core clock. Signed-off-by: Graf Yang Signed-off-by: Mike Frysinger --- arch/blackfin/mach-common/cpufreq.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/arch/blackfin/mach-common/cpufreq.c b/arch/blackfin/mach-common/cpufreq.c index 5d7f8ab5509a..4391d03dc845 100644 --- a/arch/blackfin/mach-common/cpufreq.c +++ b/arch/blackfin/mach-common/cpufreq.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -99,13 +100,15 @@ static unsigned int bfin_getfreq_khz(unsigned int cpu) return get_cclk() / 1000; } - static int bfin_target(struct cpufreq_policy *poli, unsigned int target_freq, unsigned int relation) { unsigned int index, plldiv, cpu; unsigned long flags, cclk_hz; struct cpufreq_freqs freqs; + static unsigned long lpj_ref; + static unsigned int lpj_ref_freq; + #if defined(CONFIG_CYCLES_CLOCKSOURCE) cycles_t cycles; #endif @@ -144,6 +147,14 @@ static int bfin_target(struct cpufreq_policy *poli, (cycles << __bfin_cycles_mod) - (cycles << index); __bfin_cycles_mod = index; #endif + if (!lpj_ref_freq) { + lpj_ref = loops_per_jiffy; + lpj_ref_freq = freqs.old; + } + if (freqs.new != freqs.old) { + loops_per_jiffy = cpufreq_scale(lpj_ref, + lpj_ref_freq, freqs.new); + } local_irq_restore_hw(flags); } /* TODO: just test case for cycles clock source, remove later */ -- cgit v1.2.3 From 28918c211d86b6eeb70182c523800c7bc442960c Mon Sep 17 00:00:00 2001 From: Michael Poole Date: Tue, 9 Mar 2010 06:47:35 -0500 Subject: HID: magicmouse: fix oops after device removal Ask the HID core not to register an input device for the mouse. Fix an oops after removing the device, due to leaving the new input device registered. Signed-off-by: Michael Poole Signed-off-by: Jiri Kosina --- drivers/hid/hid-magicmouse.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index 4a3a94f2b10c..c174b64c3810 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c @@ -353,7 +353,7 @@ static int magicmouse_probe(struct hid_device *hdev, goto err_free; } - ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); + ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_HIDINPUT); if (ret) { dev_err(&hdev->dev, "magicmouse hw start failed\n"); goto err_free; @@ -409,8 +409,11 @@ err_free: static void magicmouse_remove(struct hid_device *hdev) { + struct magicmouse_sc *msc = hid_get_drvdata(hdev); + hid_hw_stop(hdev); - kfree(hid_get_drvdata(hdev)); + input_unregister_device(msc->input); + kfree(msc); } static const struct hid_device_id magic_mice[] = { -- cgit v1.2.3 From eff7f270e9a05688066f40589d7b44e1dcf335dc Mon Sep 17 00:00:00 2001 From: Andrej Gelenberg Date: Tue, 9 Mar 2010 13:49:54 +0100 Subject: HID: add quirk for UC-Logik WP4030 tablet Add HID_QUIRK_MULTI_INPUT for UC-Logik tablet. $ lsusb ... Bus 004 Device 002: ID 5543:0003 UC-Logic Technology Corp. Genius MousePen 4x3 Tablet/Aquila L1 Tablet Signed-off-by: Andrej Gelenberg Signed-off-by: Jiri Kosina --- drivers/hid/hid-ids.h | 1 + drivers/hid/usbhid/hid-quirks.c | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 72c05f90553c..797e06470356 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -445,6 +445,7 @@ #define USB_VENDOR_ID_UCLOGIC 0x5543 #define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209 0x0042 +#define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U 0x0003 #define USB_VENDOR_ID_VERNIER 0x08f7 #define USB_DEVICE_ID_VERNIER_LABPRO 0x0001 diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 7844280897d1..928943c7ce9a 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -63,6 +63,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET }, { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET }, { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT }, + { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, -- cgit v1.2.3 From eb63e5d15758d2b1e607ddd5fb861b5596629380 Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Tue, 9 Mar 2010 08:41:12 -0500 Subject: Blackfin: stop cleaning include/asm/asm-offsets.h The file is no longer generated, so we don't want to clean it. Reported-by: Vivi Li Signed-off-by: Mike Frysinger --- arch/blackfin/Makefile | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/blackfin/Makefile b/arch/blackfin/Makefile index ba84206d0554..5a97a31d4bbd 100644 --- a/arch/blackfin/Makefile +++ b/arch/blackfin/Makefile @@ -133,7 +133,6 @@ KBUILD_CFLAGS += -Iarch/$(ARCH)/mach-$(MACHINE)/include KBUILD_CPPFLAGS += $(patsubst %,-I$(srctree)/%include,$(machdirs)) CLEAN_FILES += \ - arch/$(ARCH)/include/asm/asm-offsets.h \ arch/$(ARCH)/kernel/asm-offsets.s \ archclean: -- cgit v1.2.3 From abab095d1fd25986b910d3c46289d8fa3582cdc5 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Fri, 12 Feb 2010 07:44:18 -0500 Subject: cifs: add cifs_revalidate_file ...to allow updating inode attributes on an existing inode by filehandle. Change mmap and llseek codepaths to use that instead of cifs_revalidate_dentry since they have a filehandle readily available. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifsfs.c | 2 +- fs/cifs/cifsfs.h | 1 + fs/cifs/cifsproto.h | 2 ++ fs/cifs/file.c | 3 +- fs/cifs/inode.c | 86 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 91 insertions(+), 3 deletions(-) diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index cf85a4165b01..5183bc2a1916 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -639,7 +639,7 @@ static loff_t cifs_llseek(struct file *file, loff_t offset, int origin) setting the revalidate time to zero */ CIFS_I(file->f_path.dentry->d_inode)->time = 0; - retval = cifs_revalidate_dentry(file->f_path.dentry); + retval = cifs_revalidate_file(file); if (retval < 0) return (loff_t)retval; } diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 2af995ca8955..7aa57ecdc437 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -61,6 +61,7 @@ extern int cifs_mkdir(struct inode *, struct dentry *, int); extern int cifs_rmdir(struct inode *, struct dentry *); extern int cifs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *); +extern int cifs_revalidate_file(struct file *filp); extern int cifs_revalidate_dentry(struct dentry *); extern int cifs_getattr(struct vfsmount *, struct dentry *, struct kstat *); extern int cifs_setattr(struct dentry *, struct iattr *); diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index ce9199f0af9d..39e47f46dea5 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -104,10 +104,12 @@ extern void cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr); extern struct inode *cifs_iget(struct super_block *sb, struct cifs_fattr *fattr); +extern int cifs_get_file_info(struct file *filp); extern int cifs_get_inode_info(struct inode **pinode, const unsigned char *search_path, FILE_ALL_INFO *pfile_info, struct super_block *sb, int xid, const __u16 *pfid); +extern int cifs_get_file_info_unix(struct file *filp); extern int cifs_get_inode_info_unix(struct inode **pinode, const unsigned char *search_path, struct super_block *sb, int xid); diff --git a/fs/cifs/file.c b/fs/cifs/file.c index b90f8f2ca85c..1389f6ecef9e 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -1890,11 +1890,10 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size, int cifs_file_mmap(struct file *file, struct vm_area_struct *vma) { - struct dentry *dentry = file->f_path.dentry; int rc, xid; xid = GetXid(); - rc = cifs_revalidate_dentry(dentry); + rc = cifs_revalidate_file(file); if (rc) { cFYI(1, ("Validation prior to mmap failed, error=%d", rc)); FreeXid(xid); diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index f050dba920cb..0d034a84bb8d 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -268,6 +268,31 @@ cifs_create_dfs_fattr(struct cifs_fattr *fattr, struct super_block *sb) fattr->cf_flags |= CIFS_FATTR_DFS_REFERRAL; } +int cifs_get_file_info_unix(struct file *filp) +{ + int rc; + int xid; + FILE_UNIX_BASIC_INFO find_data; + struct cifs_fattr fattr; + struct inode *inode = filp->f_path.dentry->d_inode; + struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); + struct cifsTconInfo *tcon = cifs_sb->tcon; + struct cifsFileInfo *cfile = (struct cifsFileInfo *) filp->private_data; + + xid = GetXid(); + rc = CIFSSMBUnixQFileInfo(xid, tcon, cfile->netfid, &find_data); + if (!rc) { + cifs_unix_basic_to_fattr(&fattr, &find_data, cifs_sb); + } else if (rc == -EREMOTE) { + cifs_create_dfs_fattr(&fattr, inode->i_sb); + rc = 0; + } + + cifs_fattr_to_inode(inode, &fattr); + FreeXid(xid); + return rc; +} + int cifs_get_inode_info_unix(struct inode **pinode, const unsigned char *full_path, struct super_block *sb, int xid) @@ -469,6 +494,47 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info, fattr->cf_gid = cifs_sb->mnt_gid; } +int cifs_get_file_info(struct file *filp) +{ + int rc; + int xid; + FILE_ALL_INFO find_data; + struct cifs_fattr fattr; + struct inode *inode = filp->f_path.dentry->d_inode; + struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); + struct cifsTconInfo *tcon = cifs_sb->tcon; + struct cifsFileInfo *cfile = (struct cifsFileInfo *) filp->private_data; + + xid = GetXid(); + rc = CIFSSMBQFileInfo(xid, tcon, cfile->netfid, &find_data); + if (rc == -EOPNOTSUPP || rc == -EINVAL) { + /* + * FIXME: legacy server -- fall back to path-based call? + * for now, just skip revalidating and mark inode for + * immediate reval. + */ + rc = 0; + CIFS_I(inode)->time = 0; + goto cgfi_exit; + } else if (rc == -EREMOTE) { + cifs_create_dfs_fattr(&fattr, inode->i_sb); + rc = 0; + } else if (rc) + goto cgfi_exit; + + /* + * don't bother with SFU junk here -- just mark inode as needing + * revalidation. + */ + cifs_all_info_to_fattr(&fattr, &find_data, cifs_sb, false); + fattr.cf_uniqueid = CIFS_I(inode)->uniqueid; + fattr.cf_flags |= CIFS_FATTR_NEED_REVAL; + cifs_fattr_to_inode(inode, &fattr); +cgfi_exit: + FreeXid(xid); + return rc; +} + int cifs_get_inode_info(struct inode **pinode, const unsigned char *full_path, FILE_ALL_INFO *pfindData, struct super_block *sb, int xid, const __u16 *pfid) @@ -1465,6 +1531,26 @@ cifs_invalidate_mapping(struct inode *inode) invalidate_remote_inode(inode); } +int cifs_revalidate_file(struct file *filp) +{ + int rc = 0; + struct inode *inode = filp->f_path.dentry->d_inode; + + if (!cifs_inode_needs_reval(inode)) + goto check_inval; + + if (CIFS_SB(inode->i_sb)->tcon->unix_ext) + rc = cifs_get_file_info_unix(filp); + else + rc = cifs_get_file_info(filp); + +check_inval: + if (CIFS_I(inode)->invalid_mapping) + cifs_invalidate_mapping(inode); + + return rc; +} + /* revalidate a dentry's inode attributes */ int cifs_revalidate_dentry(struct dentry *dentry) { -- cgit v1.2.3 From ff215713eb33c56301cf6bfec0143ddc7f22c138 Mon Sep 17 00:00:00 2001 From: Steve French Date: Tue, 9 Mar 2010 20:30:42 +0000 Subject: [CIFS] checkpatch cleanup Signed-off-by: Steve French --- fs/cifs/file.c | 4 ++-- fs/cifs/inode.c | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 1389f6ecef9e..ca2ba7a0193c 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -219,8 +219,8 @@ static inline int cifs_open_inode_helper(struct inode *inode, struct file *file, cFYI(1, ("inode unchanged on server")); } else { if (file->f_path.dentry->d_inode->i_mapping) { - /* BB no need to lock inode until after invalidate - since namei code should already have it locked? */ + /* BB no need to lock inode until after invalidate + since namei code should already have it locked? */ rc = filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping); if (rc != 0) CIFS_I(file->f_path.dentry->d_inode)->write_behind_rc = rc; diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 0d034a84bb8d..723daaccbd0e 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -510,9 +510,9 @@ int cifs_get_file_info(struct file *filp) if (rc == -EOPNOTSUPP || rc == -EINVAL) { /* * FIXME: legacy server -- fall back to path-based call? - * for now, just skip revalidating and mark inode for - * immediate reval. - */ + * for now, just skip revalidating and mark inode for + * immediate reval. + */ rc = 0; CIFS_I(inode)->time = 0; goto cgfi_exit; -- cgit v1.2.3 From d2197e1e6038bb01568367a220e4a4e040f8337d Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Tue, 9 Mar 2010 14:59:48 -0800 Subject: omap3: Fix compile for Touch Book early_param Commit 2b0d8c251b8876d530a6bf671eb5425838fa698a changed ARM to use the common early_param code. Fix compile for Touch Book accordingly. Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/board-omap3touchbook.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/arm/mach-omap2/board-omap3touchbook.c b/arch/arm/mach-omap2/board-omap3touchbook.c index 3943d0f8322c..2a5bf5cea883 100644 --- a/arch/arm/mach-omap2/board-omap3touchbook.c +++ b/arch/arm/mach-omap2/board-omap3touchbook.c @@ -518,14 +518,14 @@ static void omap3_touchbook_poweroff(void) gpio_direction_output(TB_KILL_POWER_GPIO, 0); } -static void __init early_touchbook_revision(char **p) +static int __init early_touchbook_revision(char *p) { - if (!*p) - return; + if (!p) + return 0; - strict_strtoul(*p, 10, &touchbook_revision); + return strict_strtoul(p, 10, &touchbook_revision); } -__early_param("tbr=", early_touchbook_revision); +early_param("tbr", early_touchbook_revision); static struct omap_musb_board_data musb_board_data = { .interface_type = MUSB_INTERFACE_ULPI, -- cgit v1.2.3 From 04e82ffb0f02e645f3dde5128ef39d425a8b3c6d Mon Sep 17 00:00:00 2001 From: Peter Huewe Date: Wed, 10 Mar 2010 11:55:05 +0900 Subject: serial: sh-sci: Fix build failure for non-sh architectures. This patch fixes a build failure for various arm based defconfigs [1][2][3] and maybe other architectures/configs. The build failure was introduced by the sh specific patch [4] "serial: sh-sci: Add DMA support" by Guennadi Liakhovetski Patch against linux-next of 20100309 References: [1] http://kisskb.ellerman.id.au/kisskb/buildresult/2248992/ [2] http://kisskb.ellerman.id.au/kisskb/buildresult/2248996/ [3] http://kisskb.ellerman.id.au/kisskb/buildresult/2248998/ [4] http://git.kernel.org/?p=linux/kernel/git/sfr/linux-next.git;a=commit;h=73a19e4c0301908ce6346715fd08a74308451f5a Signed-off-by: Peter Huewe Signed-off-by: Paul Mundt --- include/linux/serial_sci.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h index 1b177d29a7f0..193d4bfe42ff 100644 --- a/include/linux/serial_sci.h +++ b/include/linux/serial_sci.h @@ -2,7 +2,9 @@ #define __LINUX_SERIAL_SCI_H #include +#ifdef CONFIG_SERIAL_SH_SCI_DMA #include +#endif /* * Generic header for SuperH SCI(F) (used by sh/sh64/h8300 and related parts) @@ -30,8 +32,10 @@ struct plat_sci_port { upf_t flags; /* UPF_* flags */ char *clk; /* clock string */ struct device *dma_dev; +#ifdef CONFIG_SERIAL_SH_SCI_DMA enum sh_dmae_slave_chan_id dma_slave_tx; enum sh_dmae_slave_chan_id dma_slave_rx; +#endif }; #endif /* __LINUX_SERIAL_SCI_H */ -- cgit v1.2.3 From 089b43f9737f2e51c6ce354749f5a9f3f093601c Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 10 Mar 2010 16:29:48 +0900 Subject: sh: Fix up NUMA build for 29-bit. pmb_bolt_mapping() is undefined on 29-bit builds, so provide a stub. This fixes up the NUMA build on platforms lacking PMB support. Signed-off-by: Paul Mundt --- arch/sh/include/asm/mmu.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index 19fe84550b49..56e4418c19b9 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h @@ -66,6 +66,13 @@ int pmb_unmap(void __iomem *addr); #else +static inline int +pmb_bolt_mapping(unsigned long virt, phys_addr_t phys, + unsigned long size, pgprot_t prot) +{ + return -EINVAL; +} + static inline void __iomem * pmb_remap_caller(phys_addr_t phys, unsigned long size, pgprot_t prot, void *caller) -- cgit v1.2.3 From 5b34d1ee1e51d61e779a25d28808e8ad824cea3d Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 10 Mar 2010 16:46:58 +0900 Subject: sh: Export uncached helper symbols. oprofile and others need to get at these, so provide symbol exports. Signed-off-by: Paul Mundt --- arch/sh/mm/uncached.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/sh/mm/uncached.c b/arch/sh/mm/uncached.c index 2ef57efeb225..8a4eca551fc0 100644 --- a/arch/sh/mm/uncached.c +++ b/arch/sh/mm/uncached.c @@ -1,4 +1,5 @@ #include +#include #include #include #include @@ -16,11 +17,14 @@ unsigned long cached_to_uncached = SZ_512M; unsigned long uncached_size = SZ_512M; unsigned long uncached_start, uncached_end; +EXPORT_SYMBOL(uncached_start); +EXPORT_SYMBOL(uncached_end); int virt_addr_uncached(unsigned long kaddr) { return (kaddr >= uncached_start) && (kaddr < uncached_end); } +EXPORT_SYMBOL(virt_addr_uncached); void __init uncached_init(void) { -- cgit v1.2.3 From 3f6da3905398826d85731247e7fbcf53400c18bd Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 5 Mar 2010 13:01:18 +0100 Subject: perf: Rework and fix the arch CPU-hotplug hooks Remove the hw_perf_event_*() hotplug hooks in favour of per PMU hotplug notifiers. This has the advantage of reducing the static weak interface as well as exposing all hotplug actions to the PMU. Use this to fix x86 hotplug usage where we did things in ONLINE which should have been done in UP_PREPARE or STARTING. Signed-off-by: Peter Zijlstra Cc: Paul Mundt Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com Cc: Arnaldo Carvalho de Melo LKML-Reference: <20100305154128.736225361@chello.nl> Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/perf_event.c | 21 +++++++++- arch/sh/kernel/perf_event.c | 20 +++++++++- arch/x86/kernel/cpu/perf_event.c | 70 ++++++++++++++++++++-------------- arch/x86/kernel/cpu/perf_event_amd.c | 60 ++++++++++++----------------- arch/x86/kernel/cpu/perf_event_intel.c | 5 ++- include/linux/perf_event.h | 16 ++++++++ kernel/perf_event.c | 15 -------- 7 files changed, 126 insertions(+), 81 deletions(-) diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 5120bd44f69a..fbe101d7505d 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c @@ -1287,7 +1287,7 @@ static void perf_event_interrupt(struct pt_regs *regs) irq_exit(); } -void hw_perf_event_setup(int cpu) +static void power_pmu_setup(int cpu) { struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); @@ -1297,6 +1297,23 @@ void hw_perf_event_setup(int cpu) cpuhw->mmcr[0] = MMCR0_FC; } +static int __cpuinit +power_pmu_notify(struct notifier_block *self, unsigned long action, void *hcpu) +{ + unsigned int cpu = (long)hcpu; + + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_UP_PREPARE: + power_pmu_setup(cpu); + break; + + default: + break; + } + + return NOTIFY_OK; +} + int register_power_pmu(struct power_pmu *pmu) { if (ppmu) @@ -1314,5 +1331,7 @@ int register_power_pmu(struct power_pmu *pmu) freeze_events_kernel = MMCR0_FCHV; #endif /* CONFIG_PPC64 */ + perf_cpu_notifier(power_pmu_notifier); + return 0; } diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c index 7ff0943e7a08..9f253e9cce01 100644 --- a/arch/sh/kernel/perf_event.c +++ b/arch/sh/kernel/perf_event.c @@ -275,13 +275,30 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) return &pmu; } -void hw_perf_event_setup(int cpu) +static void sh_pmu_setup(int cpu) { struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); memset(cpuhw, 0, sizeof(struct cpu_hw_events)); } +static int __cpuinit +sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) +{ + unsigned int cpu = (long)hcpu; + + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_UP_PREPARE: + sh_pmu_setup(cpu); + break; + + default: + break; + } + + return NOTIFY_OK; +} + void hw_perf_enable(void) { if (!sh_pmu_initialized()) @@ -308,5 +325,6 @@ int register_sh_pmu(struct sh_pmu *pmu) WARN_ON(pmu->num_events > MAX_HWEVENTS); + perf_cpu_notifier(sh_pmu_notifier); return 0; } diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 42aafd11e170..585d5608ae6b 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -157,6 +157,11 @@ struct x86_pmu { void (*put_event_constraints)(struct cpu_hw_events *cpuc, struct perf_event *event); struct event_constraint *event_constraints; + + void (*cpu_prepare)(int cpu); + void (*cpu_starting)(int cpu); + void (*cpu_dying)(int cpu); + void (*cpu_dead)(int cpu); }; static struct x86_pmu x86_pmu __read_mostly; @@ -293,7 +298,7 @@ static inline bool bts_available(void) return x86_pmu.enable_bts != NULL; } -static inline void init_debug_store_on_cpu(int cpu) +static void init_debug_store_on_cpu(int cpu) { struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; @@ -305,7 +310,7 @@ static inline void init_debug_store_on_cpu(int cpu) (u32)((u64)(unsigned long)ds >> 32)); } -static inline void fini_debug_store_on_cpu(int cpu) +static void fini_debug_store_on_cpu(int cpu) { if (!per_cpu(cpu_hw_events, cpu).ds) return; @@ -1337,6 +1342,39 @@ undo: #include "perf_event_p6.c" #include "perf_event_intel.c" +static int __cpuinit +x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) +{ + unsigned int cpu = (long)hcpu; + + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_UP_PREPARE: + if (x86_pmu.cpu_prepare) + x86_pmu.cpu_prepare(cpu); + break; + + case CPU_STARTING: + if (x86_pmu.cpu_starting) + x86_pmu.cpu_starting(cpu); + break; + + case CPU_DYING: + if (x86_pmu.cpu_dying) + x86_pmu.cpu_dying(cpu); + break; + + case CPU_DEAD: + if (x86_pmu.cpu_dead) + x86_pmu.cpu_dead(cpu); + break; + + default: + break; + } + + return NOTIFY_OK; +} + static void __init pmu_check_apic(void) { if (cpu_has_apic) @@ -1415,6 +1453,8 @@ void __init init_hw_perf_events(void) pr_info("... max period: %016Lx\n", x86_pmu.max_period); pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed); pr_info("... event mask: %016Lx\n", perf_event_mask); + + perf_cpu_notifier(x86_pmu_notifier); } static inline void x86_pmu_read(struct perf_event *event) @@ -1674,29 +1714,3 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) return entry; } - -void hw_perf_event_setup_online(int cpu) -{ - init_debug_store_on_cpu(cpu); - - switch (boot_cpu_data.x86_vendor) { - case X86_VENDOR_AMD: - amd_pmu_cpu_online(cpu); - break; - default: - return; - } -} - -void hw_perf_event_setup_offline(int cpu) -{ - init_debug_store_on_cpu(cpu); - - switch (boot_cpu_data.x86_vendor) { - case X86_VENDOR_AMD: - amd_pmu_cpu_offline(cpu); - break; - default: - return; - } -} diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index 8f3dbfda3c4f..014528ba7d57 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c @@ -271,28 +271,6 @@ done: return &emptyconstraint; } -static __initconst struct x86_pmu amd_pmu = { - .name = "AMD", - .handle_irq = x86_pmu_handle_irq, - .disable_all = x86_pmu_disable_all, - .enable_all = x86_pmu_enable_all, - .enable = x86_pmu_enable_event, - .disable = x86_pmu_disable_event, - .eventsel = MSR_K7_EVNTSEL0, - .perfctr = MSR_K7_PERFCTR0, - .event_map = amd_pmu_event_map, - .raw_event = amd_pmu_raw_event, - .max_events = ARRAY_SIZE(amd_perfmon_event_map), - .num_events = 4, - .event_bits = 48, - .event_mask = (1ULL << 48) - 1, - .apic = 1, - /* use highest bit to detect overflow */ - .max_period = (1ULL << 47) - 1, - .get_event_constraints = amd_get_event_constraints, - .put_event_constraints = amd_put_event_constraints -}; - static struct amd_nb *amd_alloc_nb(int cpu, int nb_id) { struct amd_nb *nb; @@ -378,6 +356,31 @@ static void amd_pmu_cpu_offline(int cpu) raw_spin_unlock(&amd_nb_lock); } +static __initconst struct x86_pmu amd_pmu = { + .name = "AMD", + .handle_irq = x86_pmu_handle_irq, + .disable_all = x86_pmu_disable_all, + .enable_all = x86_pmu_enable_all, + .enable = x86_pmu_enable_event, + .disable = x86_pmu_disable_event, + .eventsel = MSR_K7_EVNTSEL0, + .perfctr = MSR_K7_PERFCTR0, + .event_map = amd_pmu_event_map, + .raw_event = amd_pmu_raw_event, + .max_events = ARRAY_SIZE(amd_perfmon_event_map), + .num_events = 4, + .event_bits = 48, + .event_mask = (1ULL << 48) - 1, + .apic = 1, + /* use highest bit to detect overflow */ + .max_period = (1ULL << 47) - 1, + .get_event_constraints = amd_get_event_constraints, + .put_event_constraints = amd_put_event_constraints, + + .cpu_prepare = amd_pmu_cpu_online, + .cpu_dead = amd_pmu_cpu_offline, +}; + static __init int amd_pmu_init(void) { /* Performance-monitoring supported from K7 and later: */ @@ -390,11 +393,6 @@ static __init int amd_pmu_init(void) memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, sizeof(hw_cache_event_ids)); - /* - * explicitly initialize the boot cpu, other cpus will get - * the cpu hotplug callbacks from smp_init() - */ - amd_pmu_cpu_online(smp_processor_id()); return 0; } @@ -405,12 +403,4 @@ static int amd_pmu_init(void) return 0; } -static void amd_pmu_cpu_online(int cpu) -{ -} - -static void amd_pmu_cpu_offline(int cpu) -{ -} - #endif diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 44b60c852107..12e811a7d747 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -870,7 +870,10 @@ static __initconst struct x86_pmu intel_pmu = { .max_period = (1ULL << 31) - 1, .enable_bts = intel_pmu_enable_bts, .disable_bts = intel_pmu_disable_bts, - .get_event_constraints = intel_get_event_constraints + .get_event_constraints = intel_get_event_constraints, + + .cpu_starting = init_debug_store_on_cpu, + .cpu_dying = fini_debug_store_on_cpu, }; static __init int intel_pmu_init(void) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 6f8cd7da1a01..80acbf3d5de1 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -936,5 +936,21 @@ static inline void perf_event_disable(struct perf_event *event) { } #define perf_output_put(handle, x) \ perf_output_copy((handle), &(x), sizeof(x)) +/* + * This has to have a higher priority than migration_notifier in sched.c. + */ +#define perf_cpu_notifier(fn) \ +do { \ + static struct notifier_block fn##_nb __cpuinitdata = \ + { .notifier_call = fn, .priority = 20 }; \ + fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ + (void *)(unsigned long)smp_processor_id()); \ + fn(&fn##_nb, (unsigned long)CPU_STARTING, \ + (void *)(unsigned long)smp_processor_id()); \ + fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ + (void *)(unsigned long)smp_processor_id()); \ + register_cpu_notifier(&fn##_nb); \ +} while (0) + #endif /* __KERNEL__ */ #endif /* _LINUX_PERF_EVENT_H */ diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 4393b9e73740..73329dedb5ad 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -81,10 +81,6 @@ extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event) void __weak hw_perf_disable(void) { barrier(); } void __weak hw_perf_enable(void) { barrier(); } -void __weak hw_perf_event_setup(int cpu) { barrier(); } -void __weak hw_perf_event_setup_online(int cpu) { barrier(); } -void __weak hw_perf_event_setup_offline(int cpu) { barrier(); } - int __weak hw_perf_group_sched_in(struct perf_event *group_leader, struct perf_cpu_context *cpuctx, @@ -5382,8 +5378,6 @@ static void __cpuinit perf_event_init_cpu(int cpu) spin_lock(&perf_resource_lock); cpuctx->max_pertask = perf_max_events - perf_reserved_percpu; spin_unlock(&perf_resource_lock); - - hw_perf_event_setup(cpu); } #ifdef CONFIG_HOTPLUG_CPU @@ -5423,20 +5417,11 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) perf_event_init_cpu(cpu); break; - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - hw_perf_event_setup_online(cpu); - break; - case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: perf_event_exit_cpu(cpu); break; - case CPU_DEAD: - hw_perf_event_setup_offline(cpu); - break; - default: break; } -- cgit v1.2.3 From 32975a4f114be52286f9a5bf6c230dbb8c0e1903 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sat, 6 Mar 2010 19:49:19 +0100 Subject: perf: Optimize perf_disable Currently we always call hw_perf_disable(), even if its already disabled, this seems superflous, esp. since it cannot be made NMI safe (see further patches). Signed-off-by: Peter Zijlstra Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com Cc: Arnaldo Carvalho de Melo LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 73329dedb5ad..d8108465397d 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -93,25 +93,15 @@ void __weak perf_event_print_debug(void) { } static DEFINE_PER_CPU(int, perf_disable_count); -void __perf_disable(void) -{ - __get_cpu_var(perf_disable_count)++; -} - -bool __perf_enable(void) -{ - return !--__get_cpu_var(perf_disable_count); -} - void perf_disable(void) { - __perf_disable(); - hw_perf_disable(); + if (!__get_cpu_var(perf_disable_count)++) + hw_perf_disable(); } void perf_enable(void) { - if (__perf_enable()) + if (!--__get_cpu_var(perf_disable_count)) hw_perf_enable(); } -- cgit v1.2.3 From 3fb2b8ddcc6a7aa62af6bd2cb939edfd4c460506 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 8 Mar 2010 13:51:01 +0100 Subject: perf, x86, Do not user perf_disable from NMI context Explicitly use intel_pmu_{disable,enable}_all() in intel_pmu_handle_irq() to avoid the NMI race conditions in perf_{disable,enable} Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event_intel.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 12e811a7d747..c582449163fa 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -745,11 +745,11 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) cpuc = &__get_cpu_var(cpu_hw_events); - perf_disable(); + intel_pmu_disable_all(); intel_pmu_drain_bts_buffer(); status = intel_pmu_get_status(); if (!status) { - perf_enable(); + intel_pmu_enable_all(); return 0; } @@ -759,8 +759,7 @@ again: WARN_ONCE(1, "perfevents: irq loop stuck!\n"); perf_event_print_debug(); intel_pmu_reset(); - perf_enable(); - return 1; + goto done; } inc_irq_stat(apic_perf_irqs); @@ -790,8 +789,8 @@ again: if (status) goto again; - perf_enable(); - +done: + intel_pmu_enable_all(); return 1; } -- cgit v1.2.3 From 07088edb88164c2a2406cd2d9a7be19d8515214b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 2 Mar 2010 20:16:01 +0100 Subject: perf, x86: Remove superfluous arguments to x86_perf_event_set_period() The second and third argument to x86_perf_event_set_period() are superfluous since they are simple expressions of the first argument. Hence remove them. Signed-off-by: Peter Zijlstra Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com Cc: Arnaldo Carvalho de Melo LKML-Reference: <20100304140100.006500906@chello.nl> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 15 +++++++-------- arch/x86/kernel/cpu/perf_event_intel.c | 2 +- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 585d5608ae6b..fcf1788f9626 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -170,8 +170,7 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; -static int x86_perf_event_set_period(struct perf_event *event, - struct hw_perf_event *hwc, int idx); +static int x86_perf_event_set_period(struct perf_event *event); /* * Generalized hw caching related hw_event table, filled @@ -835,7 +834,7 @@ void hw_perf_enable(void) if (hwc->idx == -1) { x86_assign_hw_event(event, cpuc, i); - x86_perf_event_set_period(event, hwc, hwc->idx); + x86_perf_event_set_period(event); } /* * need to mark as active because x86_pmu_disable() @@ -876,12 +875,12 @@ static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); * To be called with the event disabled in hw: */ static int -x86_perf_event_set_period(struct perf_event *event, - struct hw_perf_event *hwc, int idx) +x86_perf_event_set_period(struct perf_event *event) { + struct hw_perf_event *hwc = &event->hw; s64 left = atomic64_read(&hwc->period_left); s64 period = hwc->sample_period; - int err, ret = 0; + int err, ret = 0, idx = hwc->idx; if (idx == X86_PMC_IDX_FIXED_BTS) return 0; @@ -979,7 +978,7 @@ static int x86_pmu_start(struct perf_event *event) if (hwc->idx == -1) return -EAGAIN; - x86_perf_event_set_period(event, hwc, hwc->idx); + x86_perf_event_set_period(event); x86_pmu.enable(hwc, hwc->idx); return 0; @@ -1123,7 +1122,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) handled = 1; data.period = event->hw.last_period; - if (!x86_perf_event_set_period(event, hwc, idx)) + if (!x86_perf_event_set_period(event)) continue; if (perf_event_overflow(event, 1, &data, regs)) diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index c582449163fa..6dbdf91ab342 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -699,7 +699,7 @@ static int intel_pmu_save_and_restart(struct perf_event *event) int ret; x86_perf_event_update(event, hwc, idx); - ret = x86_perf_event_set_period(event, hwc, idx); + ret = x86_perf_event_set_period(event); return ret; } -- cgit v1.2.3 From cc2ad4ba8792b9d4ff893ae3b845d2c5a6206fc9 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 2 Mar 2010 20:18:39 +0100 Subject: perf, x86: Remove superfluous arguments to x86_perf_event_update() The second and third argument to x86_perf_event_update() are superfluous since they are simple expressions of the first argument. Hence remove them. Signed-off-by: Peter Zijlstra Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com Cc: Arnaldo Carvalho de Melo LKML-Reference: <20100304140100.089468871@chello.nl> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 11 ++++++----- arch/x86/kernel/cpu/perf_event_intel.c | 10 ++-------- 2 files changed, 8 insertions(+), 13 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index fcf1788f9626..086127ba580f 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -193,11 +193,12 @@ static u64 __read_mostly hw_cache_event_ids * Returns the delta events processed. */ static u64 -x86_perf_event_update(struct perf_event *event, - struct hw_perf_event *hwc, int idx) +x86_perf_event_update(struct perf_event *event) { + struct hw_perf_event *hwc = &event->hw; int shift = 64 - x86_pmu.event_bits; u64 prev_raw_count, new_raw_count; + int idx = hwc->idx; s64 delta; if (idx == X86_PMC_IDX_FIXED_BTS) @@ -1064,7 +1065,7 @@ static void x86_pmu_stop(struct perf_event *event) * Drain the remaining delta count out of a event * that we are disabling: */ - x86_perf_event_update(event, hwc, idx); + x86_perf_event_update(event); cpuc->events[idx] = NULL; } @@ -1112,7 +1113,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) event = cpuc->events[idx]; hwc = &event->hw; - val = x86_perf_event_update(event, hwc, idx); + val = x86_perf_event_update(event); if (val & (1ULL << (x86_pmu.event_bits - 1))) continue; @@ -1458,7 +1459,7 @@ void __init init_hw_perf_events(void) static inline void x86_pmu_read(struct perf_event *event) { - x86_perf_event_update(event, &event->hw, event->hw.idx); + x86_perf_event_update(event); } static const struct pmu pmu = { diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 6dbdf91ab342..a4c9f160448e 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -694,14 +694,8 @@ static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx) */ static int intel_pmu_save_and_restart(struct perf_event *event) { - struct hw_perf_event *hwc = &event->hw; - int idx = hwc->idx; - int ret; - - x86_perf_event_update(event, hwc, idx); - ret = x86_perf_event_set_period(event); - - return ret; + x86_perf_event_update(event); + return x86_perf_event_set_period(event); } static void intel_pmu_reset(void) -- cgit v1.2.3 From aff3d91a913c9ae0c2f56b65b27cbd00c7d27ee3 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 2 Mar 2010 20:32:08 +0100 Subject: perf, x86: Change x86_pmu.{enable,disable} calling convention Pass the full perf_event into the x86_pmu functions so that those may make use of more than the hw_perf_event, and while doing this, remove the superfluous second argument. Signed-off-by: Peter Zijlstra Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com Cc: Arnaldo Carvalho de Melo LKML-Reference: <20100304140100.165166129@chello.nl> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 31 +++++++++++++++---------------- arch/x86/kernel/cpu/perf_event_intel.c | 30 +++++++++++++++++------------- arch/x86/kernel/cpu/perf_event_p6.c | 10 ++++++---- 3 files changed, 38 insertions(+), 33 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 086127ba580f..2dd704fa1299 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -133,8 +133,8 @@ struct x86_pmu { int (*handle_irq)(struct pt_regs *); void (*disable_all)(void); void (*enable_all)(void); - void (*enable)(struct hw_perf_event *, int); - void (*disable)(struct hw_perf_event *, int); + void (*enable)(struct perf_event *); + void (*disable)(struct perf_event *); unsigned eventsel; unsigned perfctr; u64 (*event_map)(int); @@ -845,7 +845,7 @@ void hw_perf_enable(void) set_bit(hwc->idx, cpuc->active_mask); cpuc->events[hwc->idx] = event; - x86_pmu.enable(hwc, hwc->idx); + x86_pmu.enable(event); perf_event_update_userpage(event); } cpuc->n_added = 0; @@ -858,15 +858,16 @@ void hw_perf_enable(void) x86_pmu.enable_all(); } -static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) +static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc) { - (void)checking_wrmsrl(hwc->config_base + idx, + (void)checking_wrmsrl(hwc->config_base + hwc->idx, hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE); } -static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx) +static inline void x86_pmu_disable_event(struct perf_event *event) { - (void)checking_wrmsrl(hwc->config_base + idx, hwc->config); + struct hw_perf_event *hwc = &event->hw; + (void)checking_wrmsrl(hwc->config_base + hwc->idx, hwc->config); } static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); @@ -927,11 +928,11 @@ x86_perf_event_set_period(struct perf_event *event) return ret; } -static void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) +static void x86_pmu_enable_event(struct perf_event *event) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); if (cpuc->enabled) - __x86_pmu_enable_event(hwc, idx); + __x86_pmu_enable_event(&event->hw); } /* @@ -974,13 +975,11 @@ static int x86_pmu_enable(struct perf_event *event) static int x86_pmu_start(struct perf_event *event) { - struct hw_perf_event *hwc = &event->hw; - - if (hwc->idx == -1) + if (event->hw.idx == -1) return -EAGAIN; x86_perf_event_set_period(event); - x86_pmu.enable(hwc, hwc->idx); + x86_pmu.enable(event); return 0; } @@ -994,7 +993,7 @@ static void x86_pmu_unthrottle(struct perf_event *event) cpuc->events[hwc->idx] != event)) return; - x86_pmu.enable(hwc, hwc->idx); + x86_pmu.enable(event); } void perf_event_print_debug(void) @@ -1059,7 +1058,7 @@ static void x86_pmu_stop(struct perf_event *event) * could reenable again: */ clear_bit(idx, cpuc->active_mask); - x86_pmu.disable(hwc, idx); + x86_pmu.disable(event); /* * Drain the remaining delta count out of a event @@ -1127,7 +1126,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) continue; if (perf_event_overflow(event, 1, &data, regs)) - x86_pmu.disable(hwc, idx); + x86_pmu.disable(event); } if (handled) diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index a4c9f160448e..a84094897799 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -548,9 +548,9 @@ static inline void intel_pmu_ack_status(u64 ack) } static inline void -intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx) +intel_pmu_disable_fixed(struct hw_perf_event *hwc) { - int idx = __idx - X86_PMC_IDX_FIXED; + int idx = hwc->idx - X86_PMC_IDX_FIXED; u64 ctrl_val, mask; mask = 0xfULL << (idx * 4); @@ -621,26 +621,28 @@ static void intel_pmu_drain_bts_buffer(void) } static inline void -intel_pmu_disable_event(struct hw_perf_event *hwc, int idx) +intel_pmu_disable_event(struct perf_event *event) { - if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { + struct hw_perf_event *hwc = &event->hw; + + if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { intel_pmu_disable_bts(); intel_pmu_drain_bts_buffer(); return; } if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { - intel_pmu_disable_fixed(hwc, idx); + intel_pmu_disable_fixed(hwc); return; } - x86_pmu_disable_event(hwc, idx); + x86_pmu_disable_event(event); } static inline void -intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx) +intel_pmu_enable_fixed(struct hw_perf_event *hwc) { - int idx = __idx - X86_PMC_IDX_FIXED; + int idx = hwc->idx - X86_PMC_IDX_FIXED; u64 ctrl_val, bits, mask; int err; @@ -670,9 +672,11 @@ intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx) err = checking_wrmsrl(hwc->config_base, ctrl_val); } -static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx) +static void intel_pmu_enable_event(struct perf_event *event) { - if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { + struct hw_perf_event *hwc = &event->hw; + + if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { if (!__get_cpu_var(cpu_hw_events).enabled) return; @@ -681,11 +685,11 @@ static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx) } if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { - intel_pmu_enable_fixed(hwc, idx); + intel_pmu_enable_fixed(hwc); return; } - __x86_pmu_enable_event(hwc, idx); + __x86_pmu_enable_event(hwc); } /* @@ -771,7 +775,7 @@ again: data.period = event->hw.last_period; if (perf_event_overflow(event, 1, &data, regs)) - intel_pmu_disable_event(&event->hw, bit); + intel_pmu_disable_event(event); } intel_pmu_ack_status(ack); diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c index a4e67b99d91c..a330485d14da 100644 --- a/arch/x86/kernel/cpu/perf_event_p6.c +++ b/arch/x86/kernel/cpu/perf_event_p6.c @@ -77,27 +77,29 @@ static void p6_pmu_enable_all(void) } static inline void -p6_pmu_disable_event(struct hw_perf_event *hwc, int idx) +p6_pmu_disable_event(struct perf_event *event) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; u64 val = P6_NOP_EVENT; if (cpuc->enabled) val |= ARCH_PERFMON_EVENTSEL_ENABLE; - (void)checking_wrmsrl(hwc->config_base + idx, val); + (void)checking_wrmsrl(hwc->config_base + hwc->idx, val); } -static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx) +static void p6_pmu_enable_event(struct perf_event *event) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; u64 val; val = hwc->config; if (cpuc->enabled) val |= ARCH_PERFMON_EVENTSEL_ENABLE; - (void)checking_wrmsrl(hwc->config_base + idx, val); + (void)checking_wrmsrl(hwc->config_base + hwc->idx, val); } static __initconst struct x86_pmu p6_pmu = { -- cgit v1.2.3 From 34538ee77b39a12702e0f4c3ed9e8fa2dd5eb92c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 2 Mar 2010 21:16:55 +0100 Subject: perf, x86: Use unlocked bitops There is no concurrency on these variables, so don't use LOCK'ed ops. As to the intel_pmu_handle_irq() status bit clean, nobody uses that so remove it all together. Signed-off-by: Peter Zijlstra Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com Cc: Arnaldo Carvalho de Melo LKML-Reference: <20100304140100.240023029@chello.nl> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 8 ++++---- arch/x86/kernel/cpu/perf_event_amd.c | 2 +- arch/x86/kernel/cpu/perf_event_intel.c | 1 - 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 2dd704fa1299..01b166737424 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -643,7 +643,7 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) if (test_bit(hwc->idx, used_mask)) break; - set_bit(hwc->idx, used_mask); + __set_bit(hwc->idx, used_mask); if (assign) assign[i] = hwc->idx; } @@ -692,7 +692,7 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) if (j == X86_PMC_IDX_MAX) break; - set_bit(j, used_mask); + __set_bit(j, used_mask); if (assign) assign[i] = j; @@ -842,7 +842,7 @@ void hw_perf_enable(void) * clear active_mask and events[] yet it preserves * idx */ - set_bit(hwc->idx, cpuc->active_mask); + __set_bit(hwc->idx, cpuc->active_mask); cpuc->events[hwc->idx] = event; x86_pmu.enable(event); @@ -1057,7 +1057,7 @@ static void x86_pmu_stop(struct perf_event *event) * Must be done before we disable, otherwise the nmi handler * could reenable again: */ - clear_bit(idx, cpuc->active_mask); + __clear_bit(idx, cpuc->active_mask); x86_pmu.disable(event); /* diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index 014528ba7d57..573458f1caf2 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c @@ -287,7 +287,7 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id) * initialize all possible NB constraints */ for (i = 0; i < x86_pmu.num_events; i++) { - set_bit(i, nb->event_constraints[i].idxmsk); + __set_bit(i, nb->event_constraints[i].idxmsk); nb->event_constraints[i].weight = 1; } return nb; diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index a84094897799..d87421c3f55b 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -765,7 +765,6 @@ again: for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { struct perf_event *event = cpuc->events[bit]; - clear_bit(bit, (unsigned long *) &status); if (!test_bit(bit, cpuc->active_mask)) continue; -- cgit v1.2.3 From c08053e627d23490a03431285b78b7a5b617fbad Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sat, 6 Mar 2010 13:19:24 +0100 Subject: perf, x86: Fix x86_pmu_start pmu::start should undo pmu::stop, make it so. Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 01b166737424..9757b96f15f5 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -785,6 +785,7 @@ static inline int match_prev_assignment(struct hw_perf_event *hwc, hwc->last_tag == cpuc->tags[i]; } +static int x86_pmu_start(struct perf_event *event); static void x86_pmu_stop(struct perf_event *event); void hw_perf_enable(void) @@ -833,20 +834,10 @@ void hw_perf_enable(void) event = cpuc->event_list[i]; hwc = &event->hw; - if (hwc->idx == -1) { + if (hwc->idx == -1) x86_assign_hw_event(event, cpuc, i); - x86_perf_event_set_period(event); - } - /* - * need to mark as active because x86_pmu_disable() - * clear active_mask and events[] yet it preserves - * idx - */ - __set_bit(hwc->idx, cpuc->active_mask); - cpuc->events[hwc->idx] = event; - x86_pmu.enable(event); - perf_event_update_userpage(event); + x86_pmu_start(event); } cpuc->n_added = 0; perf_events_lapic_init(); @@ -975,11 +966,17 @@ static int x86_pmu_enable(struct perf_event *event) static int x86_pmu_start(struct perf_event *event) { - if (event->hw.idx == -1) + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + int idx = event->hw.idx; + + if (idx == -1) return -EAGAIN; x86_perf_event_set_period(event); + cpuc->events[idx] = event; + __set_bit(idx, cpuc->active_mask); x86_pmu.enable(event); + perf_event_update_userpage(event); return 0; } -- cgit v1.2.3 From 71e2d2828046133ed985696a02e2e1499ca0bfb8 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 8 Mar 2010 17:51:33 +0100 Subject: perf, x86: Avoid double disable on throttle vs ioctl(PERF_IOC_DISABLE) Calling ioctl(PERF_EVENT_IOC_DISABLE) on a thottled counter would result in a double disable, cure this by using x86_pmu_{start,stop} for throttle/unthrottle and teach x86_pmu_stop() to check ->active_mask. Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 20 ++++++-------------- arch/x86/kernel/cpu/perf_event_intel.c | 2 +- 2 files changed, 7 insertions(+), 15 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 9757b96f15f5..b68c4fb7a944 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -983,14 +983,8 @@ static int x86_pmu_start(struct perf_event *event) static void x86_pmu_unthrottle(struct perf_event *event) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); - struct hw_perf_event *hwc = &event->hw; - - if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX || - cpuc->events[hwc->idx] != event)) - return; - - x86_pmu.enable(event); + int ret = x86_pmu_start(event); + WARN_ON_ONCE(ret); } void perf_event_print_debug(void) @@ -1050,11 +1044,9 @@ static void x86_pmu_stop(struct perf_event *event) struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; - /* - * Must be done before we disable, otherwise the nmi handler - * could reenable again: - */ - __clear_bit(idx, cpuc->active_mask); + if (!__test_and_clear_bit(idx, cpuc->active_mask)) + return; + x86_pmu.disable(event); /* @@ -1123,7 +1115,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) continue; if (perf_event_overflow(event, 1, &data, regs)) - x86_pmu.disable(event); + x86_pmu_stop(event); } if (handled) diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index d87421c3f55b..84bfde64a337 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -774,7 +774,7 @@ again: data.period = event->hw.last_period; if (perf_event_overflow(event, 1, &data, regs)) - intel_pmu_disable_event(event); + x86_pmu_stop(event); } intel_pmu_ack_status(ack); -- cgit v1.2.3 From 356e1f2e0ace2d4b100c8eda9d49b709e8323da5 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sat, 6 Mar 2010 13:49:56 +0100 Subject: perf, x86: Properly account n_added Make sure n_added is properly accounted so that we can rely on the value to reflect the number of added counters. This is needed if its going to be used for more than a boolean check. Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index b68c4fb7a944..071c8405debd 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -959,7 +959,7 @@ static int x86_pmu_enable(struct perf_event *event) memcpy(cpuc->assign, assign, n*sizeof(int)); cpuc->n_events = n; - cpuc->n_added = n - n0; + cpuc->n_added += n - n0; return 0; } @@ -1302,7 +1302,7 @@ int hw_perf_group_sched_in(struct perf_event *leader, memcpy(cpuc->assign, assign, n0*sizeof(int)); cpuc->n_events = n0; - cpuc->n_added = n1; + cpuc->n_added += n1; ctx->nr_active += n1; /* -- cgit v1.2.3 From 19925ce778f9fc371b9607625de3bff04c60121e Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sat, 6 Mar 2010 13:20:40 +0100 Subject: perf, x86: Fix double disable calls hw_perf_enable() would disable events that were not yet enabled. This causes problems with code that assumes that ->enable/->disable calls are balanced (like the LBR code does). What happens is that we disable newly added counters that match their previous assignment, even though they are not yet programmed on the hardware. Avoid this by only doing the first pass over the existing events. Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 071c8405debd..045cc0bb4c17 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -802,6 +802,7 @@ void hw_perf_enable(void) return; if (cpuc->n_added) { + int n_running = cpuc->n_events - cpuc->n_added; /* * apply assignment obtained either from * hw_perf_group_sched_in() or x86_pmu_enable() @@ -809,7 +810,7 @@ void hw_perf_enable(void) * step1: save events moving to new counters * step2: reprogram moved events into new counters */ - for (i = 0; i < cpuc->n_events; i++) { + for (i = 0; i < n_running; i++) { event = cpuc->event_list[i]; hwc = &event->hw; -- cgit v1.2.3 From f3d46b2e6fa57547f9884330798792afc83f4b04 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sat, 6 Mar 2010 13:24:58 +0100 Subject: perf, x86: Fix double enable calls hw_perf_enable() would enable already enabled events. This causes problems with code that assumes that ->enable/->disable calls are balanced (like the LBR code does). What happens is that events that were already running and left in place would get enabled again. Avoid this by only enabling new events that match their previous assignment. Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 045cc0bb4c17..1d665a0b202c 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -835,6 +835,10 @@ void hw_perf_enable(void) event = cpuc->event_list[i]; hwc = &event->hw; + if (i < n_running && + match_prev_assignment(hwc, cpuc, i)) + continue; + if (hwc->idx == -1) x86_assign_hw_event(event, cpuc, i); -- cgit v1.2.3 From d4944a06666054707d23e11888e480af239e5abf Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 8 Mar 2010 13:51:20 +0100 Subject: perf: Provide better condition for event rotation Try to avoid useless rotation and PMU disables. [ Could be improved by keeping a nr_runnable count to better account for the < PERF_STAT_INACTIVE counters ] Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: paulus@samba.org Cc: eranian@google.com Cc: robert.richter@amd.com Cc: fweisbec@gmail.com LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index d8108465397d..52c69a34d697 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -1524,12 +1524,15 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) */ if (interrupts == MAX_INTERRUPTS) { perf_log_throttle(event, 1); + perf_disable(); event->pmu->unthrottle(event); + perf_enable(); } if (!event->attr.freq || !event->attr.sample_freq) continue; + perf_disable(); event->pmu->read(event); now = atomic64_read(&event->count); delta = now - hwc->freq_count_stamp; @@ -1537,6 +1540,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) if (delta > 0) perf_adjust_period(event, TICK_NSEC, delta); + perf_enable(); } raw_spin_unlock(&ctx->lock); } @@ -1546,9 +1550,6 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) */ static void rotate_ctx(struct perf_event_context *ctx) { - if (!ctx->nr_events) - return; - raw_spin_lock(&ctx->lock); /* Rotate the first entry last of non-pinned groups */ @@ -1561,19 +1562,28 @@ void perf_event_task_tick(struct task_struct *curr) { struct perf_cpu_context *cpuctx; struct perf_event_context *ctx; + int rotate = 0; if (!atomic_read(&nr_events)) return; cpuctx = &__get_cpu_var(perf_cpu_context); - ctx = curr->perf_event_ctxp; + if (cpuctx->ctx.nr_events && + cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) + rotate = 1; - perf_disable(); + ctx = curr->perf_event_ctxp; + if (ctx && ctx->nr_events && ctx->nr_events != ctx->nr_active) + rotate = 1; perf_ctx_adjust_freq(&cpuctx->ctx); if (ctx) perf_ctx_adjust_freq(ctx); + if (!rotate) + return; + + perf_disable(); cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); if (ctx) task_ctx_sched_out(ctx, EVENT_FLEXIBLE); @@ -1585,7 +1595,6 @@ void perf_event_task_tick(struct task_struct *curr) cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE); if (ctx) task_ctx_sched_in(curr, EVENT_FLEXIBLE); - perf_enable(); } -- cgit v1.2.3 From 1224550969e0bf18785786a1a9f801cd86d68586 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 5 Mar 2010 11:54:02 -0300 Subject: perf tools: Don't trow away old map slices not overlapped by new maps MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Arnaldo Carvalho de Melo Cc: David S. Miller Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1267800842-22324-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/thread.c | 35 ++++++++++++++++++++++++++++++++--- 1 file changed, 32 insertions(+), 3 deletions(-) diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 21b92162282b..9024fa1ff5c2 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -183,8 +183,8 @@ struct thread *perf_session__findnew(struct perf_session *self, pid_t pid) return th; } -static void map_groups__remove_overlappings(struct map_groups *self, - struct map *map) +static int map_groups__fixup_overlappings(struct map_groups *self, + struct map *map) { struct rb_root *root = &self->maps[map->type]; struct rb_node *next = rb_first(root); @@ -209,7 +209,36 @@ static void map_groups__remove_overlappings(struct map_groups *self, * list. */ list_add_tail(&pos->node, &self->removed_maps[map->type]); + /* + * Now check if we need to create new maps for areas not + * overlapped by the new map: + */ + if (map->start > pos->start) { + struct map *before = map__clone(pos); + + if (before == NULL) + return -ENOMEM; + + before->end = map->start - 1; + map_groups__insert(self, before); + if (verbose >= 2) + map__fprintf(before, stderr); + } + + if (map->end < pos->end) { + struct map *after = map__clone(pos); + + if (after == NULL) + return -ENOMEM; + + after->start = map->end + 1; + map_groups__insert(self, after); + if (verbose >= 2) + map__fprintf(after, stderr); + } } + + return 0; } void maps__insert(struct rb_root *maps, struct map *map) @@ -254,7 +283,7 @@ struct map *maps__find(struct rb_root *maps, u64 ip) void thread__insert_map(struct thread *self, struct map *map) { - map_groups__remove_overlappings(&self->mg, map); + map_groups__fixup_overlappings(&self->mg, map); map_groups__insert(&self->mg, map); } -- cgit v1.2.3 From accd3cc45a0e1d11090ea66888405987de77bdca Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 5 Mar 2010 12:51:04 -0300 Subject: perf probe: Add missing variable initialization MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit cc1: warnings being treated as errors util/probe-finder.c: In function 'find_line_range': util/probe-finder.c:172: warning: 'src' may be used uninitialized in this function make: *** [util/probe-finder.o] Error 1 Signed-off-by: Arnaldo Carvalho de Melo Acked-by: Masami Hiramatsu Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1267804269-22660-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/probe-finder.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index e77dc886760e..1e6c65ebbd80 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -169,7 +169,7 @@ static const char *cu_find_realpath(Dwarf_Die *cu_die, const char *fname) { Dwarf_Files *files; size_t nfiles, i; - const char *src; + const char *src = NULL; int ret; if (!fname) -- cgit v1.2.3 From 8907fd607b66e36636469a2de9833db643869db8 Mon Sep 17 00:00:00 2001 From: Eric B Munson Date: Fri, 5 Mar 2010 12:51:05 -0300 Subject: perf record: Add ID and to recorded event data when recording multiple events Currently perf record does not write the ID or the to disk for events. This doesn't allow report to tell if an event stream contains one or more types of events. This patch adds this entry to the list of data that record will write to disk if more than one event was requested. Signed-off-by: Eric B Munson Signed-off-by: Arnaldo Carvalho de Melo Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1267804269-22660-2-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 771533ced6a8..f573bbb83572 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -244,6 +244,9 @@ static void create_counter(int counter, int cpu, pid_t pid) attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID; + if (nr_counters > 1) + attr->sample_type |= PERF_SAMPLE_ID; + if (freq) { attr->sample_type |= PERF_SAMPLE_PERIOD; attr->freq = 1; -- cgit v1.2.3 From d403d0acc9c5afa679a3f61e71489530d7fa0606 Mon Sep 17 00:00:00 2001 From: Eric B Munson Date: Fri, 5 Mar 2010 12:51:06 -0300 Subject: perf session: Change add_hist_entry to take the tree root instead of session In order to minimize the impact of storing multiple events in a report this function will now take the root of the histogram tree so that the logic for selecting the proper tree can be inserted before the call. Signed-off-by: Eric B Munson Signed-off-by: Arnaldo Carvalho de Melo Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1267804269-22660-3-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 2 +- tools/perf/builtin-diff.c | 3 ++- tools/perf/builtin-report.c | 3 ++- tools/perf/util/hist.c | 6 +++--- tools/perf/util/hist.h | 3 ++- 5 files changed, 10 insertions(+), 7 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 5ec5de995872..4b734c731e27 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -116,7 +116,7 @@ static int perf_session__add_hist_entry(struct perf_session *self, return 0; } - he = __perf_session__add_hist_entry(self, al, NULL, count, &hit); + he = __perf_session__add_hist_entry(&self->hists, al, NULL, count, &hit); if (he == NULL) return -ENOMEM; diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index 18b3f505f9db..20df7352629b 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c @@ -26,7 +26,8 @@ static int perf_session__add_hist_entry(struct perf_session *self, struct addr_location *al, u64 count) { bool hit; - struct hist_entry *he = __perf_session__add_hist_entry(self, al, NULL, + struct hist_entry *he = __perf_session__add_hist_entry(&self->hists, + al, NULL, count, &hit); if (he == NULL) return -ENOMEM; diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index cfc655d40bb7..cd16e6a7d6d0 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -56,7 +56,8 @@ static int perf_session__add_hist_entry(struct perf_session *self, if ((sort__has_parent || symbol_conf.use_callchain) && chain) syms = perf_session__resolve_callchain(self, al->thread, chain, &parent); - he = __perf_session__add_hist_entry(self, al, parent, count, &hit); + he = __perf_session__add_hist_entry(&self->hists, al, parent, + count, &hit); if (he == NULL) return -ENOMEM; diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index e8daf5ca6fd2..55dd9115d1b4 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -12,12 +12,12 @@ struct callchain_param callchain_param = { * histogram, sorted on item, collects counts */ -struct hist_entry *__perf_session__add_hist_entry(struct perf_session *self, +struct hist_entry *__perf_session__add_hist_entry(struct rb_root *hists, struct addr_location *al, struct symbol *sym_parent, u64 count, bool *hit) { - struct rb_node **p = &self->hists.rb_node; + struct rb_node **p = &hists->rb_node; struct rb_node *parent = NULL; struct hist_entry *he; struct hist_entry entry = { @@ -53,7 +53,7 @@ struct hist_entry *__perf_session__add_hist_entry(struct perf_session *self, return NULL; *he = entry; rb_link_node(&he->rb_node, parent, p); - rb_insert_color(&he->rb_node, &self->hists); + rb_insert_color(&he->rb_node, hists); *hit = false; return he; } diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index e5f99b24048b..7b48590c3ee8 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -10,8 +10,9 @@ struct perf_session; struct hist_entry; struct addr_location; struct symbol; +struct rb_root; -struct hist_entry *__perf_session__add_hist_entry(struct perf_session *self, +struct hist_entry *__perf_session__add_hist_entry(struct rb_root *hists, struct addr_location *al, struct symbol *parent, u64 count, bool *hit); -- cgit v1.2.3 From cb8f09393646c5058056db771583c86e0ed1d92f Mon Sep 17 00:00:00 2001 From: Eric B Munson Date: Fri, 5 Mar 2010 12:51:07 -0300 Subject: perf session: Add storage for seperating event types in report This patch adds the structures necessary to count each event type independently in perf report. Signed-off-by: Eric B Munson Signed-off-by: Arnaldo Carvalho de Melo Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1267804269-22660-4-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/event.h | 9 +++++++++ tools/perf/util/session.c | 1 + tools/perf/util/session.h | 1 + 3 files changed, 11 insertions(+) diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 50a7132887f5..a33b94952e34 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -99,6 +99,15 @@ struct events_stats { u64 lost; }; +struct event_stat_id { + struct rb_node rb_node; + struct rb_root hists; + struct events_stats stats; + u64 config; + u64 event_stream; + u32 type; +}; + void event__print_totals(void); struct perf_session; diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 0de7258e70a5..eed1cb889008 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -70,6 +70,7 @@ struct perf_session *perf_session__new(const char *filename, int mode, bool forc memcpy(self->filename, filename, len); self->threads = RB_ROOT; + self->stats_by_id = RB_ROOT; self->last_match = NULL; self->mmap_window = 32; self->cwd = NULL; diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index 31950fcd8a4d..5c33417eebb3 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h @@ -20,6 +20,7 @@ struct perf_session { struct thread *last_match; struct map *vmlinux_maps[MAP__NR_TYPES]; struct events_stats events_stats; + struct rb_root stats_by_id; unsigned long event_total[PERF_RECORD_MAX]; unsigned long unknown_events; struct rb_root hists; -- cgit v1.2.3 From eefc465cdd49cb89a742083fac2807c718ddad31 Mon Sep 17 00:00:00 2001 From: Eric B Munson Date: Fri, 5 Mar 2010 12:51:08 -0300 Subject: perf session: Change perf_session post processing functions to take histogram tree Now that report can store historgrams for multiple events we need to be able to do the post processing work for each histogram. This patch changes the post processing functions so that they can be called individually for each event's histogram. Signed-off-by: Eric B Munson [ Guarantee bisectabilty by fixing up builtin-report.c ] Signed-off-by: Arnaldo Carvalho de Melo Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1267804269-22660-5-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-annotate.c | 4 ++-- tools/perf/builtin-diff.c | 10 ++++++---- tools/perf/builtin-report.c | 8 +++++--- tools/perf/util/hist.c | 39 ++++++++++++++++++++------------------- tools/perf/util/hist.h | 9 +++++---- 5 files changed, 38 insertions(+), 32 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 4b734c731e27..6ad7148451c5 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -564,8 +564,8 @@ static int __cmd_annotate(void) if (verbose > 2) dsos__fprintf(stdout); - perf_session__collapse_resort(session); - perf_session__output_resort(session, session->event_total[0]); + perf_session__collapse_resort(&session->hists); + perf_session__output_resort(&session->hists, session->event_total[0]); perf_session__find_annotations(session); out_delete: perf_session__delete(session); diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index 20df7352629b..1ea15d8aeed1 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c @@ -115,7 +115,7 @@ static void perf_session__resort_hist_entries(struct perf_session *self) static void perf_session__set_hist_entries_positions(struct perf_session *self) { - perf_session__output_resort(self, self->events_stats.total); + perf_session__output_resort(&self->hists, self->events_stats.total); perf_session__resort_hist_entries(self); } @@ -167,13 +167,15 @@ static int __cmd_diff(void) goto out_delete; } - perf_session__output_resort(session[1], session[1]->events_stats.total); + perf_session__output_resort(&session[1]->hists, + session[1]->events_stats.total); if (show_displacement) perf_session__set_hist_entries_positions(session[0]); perf_session__match_hists(session[0], session[1]); - perf_session__fprintf_hists(session[1], session[0], - show_displacement, stdout); + perf_session__fprintf_hists(&session[1]->hists, session[0], + show_displacement, stdout, + session[1]->events_stats.total); out_delete: for (i = 0; i < 2; ++i) perf_session__delete(session[i]); diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index cd16e6a7d6d0..294b4cf105f2 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -225,10 +225,12 @@ static int __cmd_report(void) if (verbose > 2) dsos__fprintf(stdout); - perf_session__collapse_resort(session); - perf_session__output_resort(session, session->events_stats.total); + perf_session__collapse_resort(&session->hists); + perf_session__output_resort(&session->hists, + session->events_stats.total); fprintf(stdout, "# Samples: %Ld\n#\n", session->events_stats.total); - perf_session__fprintf_hists(session, NULL, false, stdout); + perf_session__fprintf_hists(&session->hists, NULL, false, stdout, + session->events_stats.total); if (sort_order == default_sort_order && parent_pattern == default_parent_pattern) fprintf(stdout, "#\n# (For a higher level overview, try: perf report --sort comm,dso)\n#\n"); diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 55dd9115d1b4..73ebb6fb34ac 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -130,7 +130,7 @@ static void collapse__insert_entry(struct rb_root *root, struct hist_entry *he) rb_insert_color(&he->rb_node, root); } -void perf_session__collapse_resort(struct perf_session *self) +void perf_session__collapse_resort(struct rb_root *hists) { struct rb_root tmp; struct rb_node *next; @@ -140,17 +140,17 @@ void perf_session__collapse_resort(struct perf_session *self) return; tmp = RB_ROOT; - next = rb_first(&self->hists); + next = rb_first(hists); while (next) { n = rb_entry(next, struct hist_entry, rb_node); next = rb_next(&n->rb_node); - rb_erase(&n->rb_node, &self->hists); + rb_erase(&n->rb_node, hists); collapse__insert_entry(&tmp, n); } - self->hists = tmp; + *hists = tmp; } /* @@ -183,7 +183,7 @@ static void perf_session__insert_output_hist_entry(struct rb_root *root, rb_insert_color(&he->rb_node, root); } -void perf_session__output_resort(struct perf_session *self, u64 total_samples) +void perf_session__output_resort(struct rb_root *hists, u64 total_samples) { struct rb_root tmp; struct rb_node *next; @@ -194,18 +194,18 @@ void perf_session__output_resort(struct perf_session *self, u64 total_samples) total_samples * (callchain_param.min_percent / 100); tmp = RB_ROOT; - next = rb_first(&self->hists); + next = rb_first(hists); while (next) { n = rb_entry(next, struct hist_entry, rb_node); next = rb_next(&n->rb_node); - rb_erase(&n->rb_node, &self->hists); + rb_erase(&n->rb_node, hists); perf_session__insert_output_hist_entry(&tmp, n, min_callchain_hits); } - self->hists = tmp; + *hists = tmp; } static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin) @@ -456,10 +456,10 @@ static size_t hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self, } static size_t hist_entry__fprintf(struct hist_entry *self, - struct perf_session *session, struct perf_session *pair_session, bool show_displacement, - long displacement, FILE *fp) + long displacement, FILE *fp, + u64 session_total) { struct sort_entry *se; u64 count, total; @@ -474,7 +474,7 @@ static size_t hist_entry__fprintf(struct hist_entry *self, total = pair_session->events_stats.total; } else { count = self->count; - total = session->events_stats.total; + total = session_total; } if (total) @@ -496,8 +496,8 @@ static size_t hist_entry__fprintf(struct hist_entry *self, if (total > 0) old_percent = (count * 100.0) / total; - if (session->events_stats.total > 0) - new_percent = (self->count * 100.0) / session->events_stats.total; + if (session_total > 0) + new_percent = (self->count * 100.0) / session_total; diff = new_percent - old_percent; @@ -544,16 +544,17 @@ static size_t hist_entry__fprintf(struct hist_entry *self, left_margin -= thread__comm_len(self->thread); } - hist_entry_callchain__fprintf(fp, self, session->events_stats.total, + hist_entry_callchain__fprintf(fp, self, session_total, left_margin); } return ret; } -size_t perf_session__fprintf_hists(struct perf_session *self, +size_t perf_session__fprintf_hists(struct rb_root *hists, struct perf_session *pair, - bool show_displacement, FILE *fp) + bool show_displacement, FILE *fp, + u64 session_total) { struct sort_entry *se; struct rb_node *nd; @@ -641,7 +642,7 @@ size_t perf_session__fprintf_hists(struct perf_session *self, fprintf(fp, "\n#\n"); print_entries: - for (nd = rb_first(&self->hists); nd; nd = rb_next(nd)) { + for (nd = rb_first(hists); nd; nd = rb_next(nd)) { struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); if (show_displacement) { @@ -652,8 +653,8 @@ print_entries: displacement = 0; ++position; } - ret += hist_entry__fprintf(h, self, pair, show_displacement, - displacement, fp); + ret += hist_entry__fprintf(h, pair, show_displacement, + displacement, fp, session_total); } free(rem_sq_bracket); diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 7b48590c3ee8..16f360cce5bf 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -20,9 +20,10 @@ extern int64_t hist_entry__cmp(struct hist_entry *, struct hist_entry *); extern int64_t hist_entry__collapse(struct hist_entry *, struct hist_entry *); void hist_entry__free(struct hist_entry *); -void perf_session__output_resort(struct perf_session *self, u64 total_samples); -void perf_session__collapse_resort(struct perf_session *self); -size_t perf_session__fprintf_hists(struct perf_session *self, +void perf_session__output_resort(struct rb_root *hists, u64 total_samples); +void perf_session__collapse_resort(struct rb_root *hists); +size_t perf_session__fprintf_hists(struct rb_root *hists, struct perf_session *pair, - bool show_displacement, FILE *fp); + bool show_displacement, FILE *fp, + u64 session_total); #endif /* __PERF_HIST_H */ -- cgit v1.2.3 From cbbc79a53278b83bf7f834127751459f9299e402 Mon Sep 17 00:00:00 2001 From: Eric B Munson Date: Fri, 5 Mar 2010 12:51:09 -0300 Subject: perf report: Add multiple event support Perf report does not handle multiple events being reported, even though perf record stores them properly on disk. This patch addresses that issue by adding the logic to perf report to use the event stream id that is saved by record and the new data structures to seperate the event streams and report them individually. Signed-off-by: Eric B Munson Signed-off-by: Arnaldo Carvalho de Melo Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1267804269-22660-6-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-report.c | 115 ++++++++++++++++++++++++++++++++++++++------ 1 file changed, 100 insertions(+), 15 deletions(-) diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 294b4cf105f2..f815de25d0fc 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -45,29 +45,71 @@ static char *pretty_printing_style = default_pretty_printing_style; static char callchain_default_opt[] = "fractal,0.5"; +static struct event_stat_id *get_stats(struct perf_session *self, + u64 event_stream, u32 type, u64 config) +{ + struct rb_node **p = &self->stats_by_id.rb_node; + struct rb_node *parent = NULL; + struct event_stat_id *iter, *new; + + while (*p != NULL) { + parent = *p; + iter = rb_entry(parent, struct event_stat_id, rb_node); + if (iter->config == config) + return iter; + + + if (config > iter->config) + p = &(*p)->rb_right; + else + p = &(*p)->rb_left; + } + + new = malloc(sizeof(struct event_stat_id)); + if (new == NULL) + return NULL; + memset(new, 0, sizeof(struct event_stat_id)); + new->event_stream = event_stream; + new->config = config; + new->type = type; + rb_link_node(&new->rb_node, parent, p); + rb_insert_color(&new->rb_node, &self->stats_by_id); + return new; +} + static int perf_session__add_hist_entry(struct perf_session *self, struct addr_location *al, - struct ip_callchain *chain, u64 count) + struct sample_data *data) { struct symbol **syms = NULL, *parent = NULL; bool hit; struct hist_entry *he; + struct event_stat_id *stats; + struct perf_event_attr *attr; - if ((sort__has_parent || symbol_conf.use_callchain) && chain) + if ((sort__has_parent || symbol_conf.use_callchain) && data->callchain) syms = perf_session__resolve_callchain(self, al->thread, - chain, &parent); - he = __perf_session__add_hist_entry(&self->hists, al, parent, - count, &hit); + data->callchain, &parent); + + attr = perf_header__find_attr(data->id, &self->header); + if (attr) + stats = get_stats(self, data->id, attr->type, attr->config); + else + stats = get_stats(self, data->id, 0, 0); + if (stats == NULL) + return -ENOMEM; + he = __perf_session__add_hist_entry(&stats->hists, al, parent, + data->period, &hit); if (he == NULL) return -ENOMEM; if (hit) - he->count += count; + he->count += data->period; if (symbol_conf.use_callchain) { if (!hit) callchain_init(&he->callchain); - append_chain(&he->callchain, chain, syms); + append_chain(&he->callchain, data->callchain, syms); free(syms); } @@ -87,10 +129,30 @@ static int validate_chain(struct ip_callchain *chain, event_t *event) return 0; } +static int add_event_total(struct perf_session *session, + struct sample_data *data, + struct perf_event_attr *attr) +{ + struct event_stat_id *stats; + + if (attr) + stats = get_stats(session, data->id, attr->type, attr->config); + else + stats = get_stats(session, data->id, 0, 0); + + if (!stats) + return -ENOMEM; + + stats->stats.total += data->period; + session->events_stats.total += data->period; + return 0; +} + static int process_sample_event(event_t *event, struct perf_session *session) { struct sample_data data = { .period = 1, }; struct addr_location al; + struct perf_event_attr *attr; event__parse_sample(event, session->sample_type, &data); @@ -124,12 +186,18 @@ static int process_sample_event(event_t *event, struct perf_session *session) if (al.filtered || (hide_unresolved && al.sym == NULL)) return 0; - if (perf_session__add_hist_entry(session, &al, data.callchain, data.period)) { + if (perf_session__add_hist_entry(session, &al, &data)) { pr_debug("problem incrementing symbol count, skipping event\n"); return -1; } - session->events_stats.total += data.period; + attr = perf_header__find_attr(data.id, &session->header); + + if (add_event_total(session, &data, attr)) { + pr_debug("problem adding event count\n"); + return -1; + } + return 0; } @@ -198,6 +266,7 @@ static int __cmd_report(void) { int ret = -EINVAL; struct perf_session *session; + struct rb_node *next; session = perf_session__new(input_name, O_RDONLY, force); if (session == NULL) @@ -225,12 +294,28 @@ static int __cmd_report(void) if (verbose > 2) dsos__fprintf(stdout); - perf_session__collapse_resort(&session->hists); - perf_session__output_resort(&session->hists, - session->events_stats.total); - fprintf(stdout, "# Samples: %Ld\n#\n", session->events_stats.total); - perf_session__fprintf_hists(&session->hists, NULL, false, stdout, - session->events_stats.total); + next = rb_first(&session->stats_by_id); + while (next) { + struct event_stat_id *stats; + + stats = rb_entry(next, struct event_stat_id, rb_node); + perf_session__collapse_resort(&stats->hists); + perf_session__output_resort(&stats->hists, stats->stats.total); + if (rb_first(&session->stats_by_id) == + rb_last(&session->stats_by_id)) + fprintf(stdout, "# Samples: %Ld\n#\n", + stats->stats.total); + else + fprintf(stdout, "# Samples: %Ld %s\n#\n", + stats->stats.total, + __event_name(stats->type, stats->config)); + + perf_session__fprintf_hists(&stats->hists, NULL, false, stdout, + stats->stats.total); + fprintf(stdout, "\n\n"); + next = rb_next(&stats->rb_node); + } + if (sort_order == default_sort_order && parent_pattern == default_parent_pattern) fprintf(stdout, "#\n# (For a higher level overview, try: perf report --sort comm,dso)\n#\n"); -- cgit v1.2.3 From 65f2ed2b2fa6034ef9890b60c8fd39fbe76b9d37 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 9 Mar 2010 15:58:17 -0300 Subject: perf report: Print the map table just after samples for which no map was found MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If -vv is used just the map table will be printed, -vvv will print the symbol table too, with it we can see that we have a bug where some samples are not being resolved to a map when we get them in the perf.data stream, but after we have it all processed, we can find the right map, some reordering probably is happening. Upcoming patches will provide ways to ask for most PERF_SAMPLE_ conditional samples to be taken for !PERF_RECORD_SAMPLE events too, then we'll be able to ask for PERF_SAMPLE_TIME and PERF_SAMPLE_CPU to help diagnose this. Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras LKML-Reference: <1268161097-17761-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/util/hist.c | 5 +++++ tools/perf/util/thread.c | 6 +++--- tools/perf/util/thread.h | 3 +++ 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 73ebb6fb34ac..bdcfd6190b21 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -655,6 +655,11 @@ print_entries: } ret += hist_entry__fprintf(h, pair, show_displacement, displacement, fp, session_total); + if (h->map == NULL && verbose > 1) { + __map_groups__fprintf_maps(&h->thread->mg, + MAP__FUNCTION, fp); + fprintf(fp, "%.10s end\n", graph_dotted_line); + } } free(rem_sq_bracket); diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 9024fa1ff5c2..fa968312ee7d 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -79,8 +79,8 @@ int thread__comm_len(struct thread *self) return self->comm_len; } -static size_t __map_groups__fprintf_maps(struct map_groups *self, - enum map_type type, FILE *fp) +size_t __map_groups__fprintf_maps(struct map_groups *self, + enum map_type type, FILE *fp) { size_t printed = fprintf(fp, "%s:\n", map_type__name[type]); struct rb_node *nd; @@ -89,7 +89,7 @@ static size_t __map_groups__fprintf_maps(struct map_groups *self, struct map *pos = rb_entry(nd, struct map, rb_node); printed += fprintf(fp, "Map:"); printed += map__fprintf(pos, fp); - if (verbose > 1) { + if (verbose > 2) { printed += dso__fprintf(pos->dso, type, fp); printed += fprintf(fp, "--\n"); } diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index 0a28f39de545..dcf70303e58e 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -10,6 +10,9 @@ struct map_groups { struct list_head removed_maps[MAP__NR_TYPES]; }; +size_t __map_groups__fprintf_maps(struct map_groups *self, + enum map_type type, FILE *fp); + struct thread { struct rb_node rb_node; struct map_groups mg; -- cgit v1.2.3 From db2c4c7791cd04512093d05afc693c3511a65fd7 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 2 Feb 2010 23:34:40 +0100 Subject: lockdep: Move lock events under lockdep recursion protection There are rcu locked read side areas in the path where we submit a trace event. And these rcu_read_(un)lock() trigger lock events, which create recursive events. One pair in do_perf_sw_event: __lock_acquire | |--96.11%-- lock_acquire | | | |--27.21%-- do_perf_sw_event | | perf_tp_event | | | | | |--49.62%-- ftrace_profile_lock_release | | | lock_release | | | | | | | |--33.85%-- _raw_spin_unlock Another pair in perf_output_begin/end: __lock_acquire |--23.40%-- perf_output_begin | | __perf_event_overflow | | perf_swevent_overflow | | perf_swevent_add | | perf_swevent_ctx_event | | do_perf_sw_event | | perf_tp_event | | | | | |--55.37%-- ftrace_profile_lock_acquire | | | lock_acquire | | | | | | | |--37.31%-- _raw_spin_lock The problem is not that much the trace recursion itself, as we have a recursion protection already (though it's always wasteful to recurse). But the trace events are outside the lockdep recursion protection, then each lockdep event triggers a lock trace, which will trigger two other lockdep events. Here the recursive lock trace event won't be taken because of the trace recursion, so the recursion stops there but lockdep will still analyse these new events: To sum up, for each lockdep events we have: lock_*() | trace lock_acquire | ----- rcu_read_lock() | | | lock_acquire() | | | trace_lock_acquire() (stopped) | | | lockdep analyze | ----- rcu_read_unlock() | lock_release | trace_lock_release() (stopped) | lockdep analyze And you can repeat the above two times as we have two rcu read side sections when we submit an event. This is fixed in this patch by moving the lock trace event under the lockdep recursion protection. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Steven Rostedt Cc: Paul Mackerras Cc: Hitoshi Mitake Cc: Li Zefan Cc: Lai Jiangshan Cc: Masami Hiramatsu Cc: Jens Axboe --- kernel/lockdep.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 0c30d0455de1..65b5f5b7c298 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -3211,8 +3211,6 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, { unsigned long flags; - trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); - if (unlikely(current->lockdep_recursion)) return; @@ -3220,6 +3218,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, check_flags(flags); current->lockdep_recursion = 1; + trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); __lock_acquire(lock, subclass, trylock, read, check, irqs_disabled_flags(flags), nest_lock, ip, 0); current->lockdep_recursion = 0; @@ -3232,14 +3231,13 @@ void lock_release(struct lockdep_map *lock, int nested, { unsigned long flags; - trace_lock_release(lock, nested, ip); - if (unlikely(current->lockdep_recursion)) return; raw_local_irq_save(flags); check_flags(flags); current->lockdep_recursion = 1; + trace_lock_release(lock, nested, ip); __lock_release(lock, nested, ip); current->lockdep_recursion = 0; raw_local_irq_restore(flags); @@ -3413,8 +3411,6 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip) { unsigned long flags; - trace_lock_contended(lock, ip); - if (unlikely(!lock_stat)) return; @@ -3424,6 +3420,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip) raw_local_irq_save(flags); check_flags(flags); current->lockdep_recursion = 1; + trace_lock_contended(lock, ip); __lock_contended(lock, ip); current->lockdep_recursion = 0; raw_local_irq_restore(flags); -- cgit v1.2.3 From 61e67fb9d3ed13e6a7f58652ae4979b9c872fa57 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 3 Mar 2010 07:38:37 +0100 Subject: perf/x86-64: Use frame pointer to walk on irq and process stacks We were using the frame pointer based stack walker on every contexts in x86-32, but not in x86-64 where we only use the seven-league boots on the exception stacks. Use it also on irq and process stacks. This utterly accelerate the captures. Signed-off-by: Frederic Weisbecker Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo --- arch/x86/kernel/dumpstack_64.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index d5e2a2ebb627..272c9f1f05f3 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -208,7 +208,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, if (in_irq_stack(stack, irq_stack, irq_stack_end)) { if (ops->stack(data, "IRQ") < 0) break; - bp = print_context_stack(tinfo, stack, bp, + bp = ops->walk_stack(tinfo, stack, bp, ops, data, irq_stack_end, &graph); /* * We link to the next stack (which would be @@ -229,7 +229,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, /* * This handles the process stack: */ - bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph); + bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph); put_cpu(); } EXPORT_SYMBOL(dump_trace); -- cgit v1.2.3 From 5331d7b84613b8325362dde53dc2bff2fb87d351 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 4 Mar 2010 21:15:56 +0100 Subject: perf: Introduce new perf_fetch_caller_regs() for hot regs snapshot Events that trigger overflows by interrupting a context can use get_irq_regs() or task_pt_regs() to retrieve the state when the event triggered. But this is not the case for some other class of events like trace events as tracepoints are executed in the same context than the code that triggered the event. It means we need a different api to capture the regs there, namely we need a hot snapshot to get the most important informations for perf: the instruction pointer to get the event origin, the frame pointer for the callchain, the code segment for user_mode() tests (we always use __KERNEL_CS as trace events always occur from the kernel) and the eflags for further purposes. v2: rename perf_save_regs to perf_fetch_caller_regs as per Masami's suggestion. Signed-off-by: Frederic Weisbecker Cc: Ingo Molnar Cc: Thomas Gleixner Cc: H. Peter Anvin Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Steven Rostedt Cc: Arnaldo Carvalho de Melo Cc: Masami Hiramatsu Cc: Jason Baron Cc: Archs --- arch/x86/kernel/cpu/perf_event.c | 12 ++++++++++++ arch/x86/kernel/dumpstack.h | 15 ++++++++++++++ include/linux/perf_event.h | 42 +++++++++++++++++++++++++++++++++++++++- kernel/perf_event.c | 5 +++++ 4 files changed, 73 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 1d665a0b202c..c6bde7d7afdc 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1707,3 +1707,15 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) return entry; } + +void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) +{ + regs->ip = ip; + /* + * perf_arch_fetch_caller_regs adds another call, we need to increment + * the skip level + */ + regs->bp = rewind_frame_pointer(skip + 1); + regs->cs = __KERNEL_CS; + local_save_flags(regs->flags); +} diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h index 4fd1420faffa..29e5f7c845b2 100644 --- a/arch/x86/kernel/dumpstack.h +++ b/arch/x86/kernel/dumpstack.h @@ -29,4 +29,19 @@ struct stack_frame { struct stack_frame *next_frame; unsigned long return_address; }; + +static inline unsigned long rewind_frame_pointer(int n) +{ + struct stack_frame *frame; + + get_bp(frame); + +#ifdef CONFIG_FRAME_POINTER + while (n--) + frame = frame->next_frame; #endif + + return (unsigned long)frame; +} + +#endif /* DUMPSTACK_H */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 80acbf3d5de1..70cffd052c04 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -452,6 +452,7 @@ enum perf_callchain_context { #include #include #include +#include #include #define PERF_MAX_STACK_DEPTH 255 @@ -847,6 +848,44 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) __perf_sw_event(event_id, nr, nmi, regs, addr); } +extern void +perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip); + +/* + * Take a snapshot of the regs. Skip ip and frame pointer to + * the nth caller. We only need a few of the regs: + * - ip for PERF_SAMPLE_IP + * - cs for user_mode() tests + * - bp for callchains + * - eflags, for future purposes, just in case + */ +static inline void perf_fetch_caller_regs(struct pt_regs *regs, int skip) +{ + unsigned long ip; + + memset(regs, 0, sizeof(*regs)); + + switch (skip) { + case 1 : + ip = CALLER_ADDR0; + break; + case 2 : + ip = CALLER_ADDR1; + break; + case 3 : + ip = CALLER_ADDR2; + break; + case 4: + ip = CALLER_ADDR3; + break; + /* No need to support further for now */ + default: + ip = 0; + } + + return perf_arch_fetch_caller_regs(regs, ip, skip); +} + extern void __perf_event_mmap(struct vm_area_struct *vma); static inline void perf_event_mmap(struct vm_area_struct *vma) @@ -880,7 +919,8 @@ static inline bool perf_paranoid_kernel(void) } extern void perf_event_init(void); -extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, int entry_size); +extern void perf_tp_event(int event_id, u64 addr, u64 count, void *record, + int entry_size, struct pt_regs *regs); extern void perf_bp_event(struct perf_event *event, void *data); #ifndef perf_misc_flags diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 52c69a34d697..359d7f690c2b 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2786,6 +2786,11 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) return NULL; } +__weak +void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) +{ +} + /* * Output */ -- cgit v1.2.3 From c530665c31c0140b74ca7689e7f836177796e5bd Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 3 Mar 2010 07:16:16 +0100 Subject: perf: Take a hot regs snapshot for trace events We are taking a wrong regs snapshot when a trace event triggers. Either we use get_irq_regs(), which gives us the interrupted registers if we are in an interrupt, or we use task_pt_regs() which gives us the state before we entered the kernel, assuming we are lucky enough to be no kernel thread, in which case task_pt_regs() returns the initial set of regs when the kernel thread was started. What we want is different. We need a hot snapshot of the regs, so that we can get the instruction pointer to record in the sample, the frame pointer for the callchain, and some other things. Let's use the new perf_fetch_caller_regs() for that. Comparison with perf record -e lock: -R -a -f -g Before: perf [kernel] [k] __do_softirq | --- __do_softirq | |--55.16%-- __open | --44.84%-- __write_nocancel After: perf [kernel] [k] perf_tp_event | --- perf_tp_event | |--41.07%-- lock_acquire | | | |--39.36%-- _raw_spin_lock | | | | | |--7.81%-- hrtimer_interrupt | | | smp_apic_timer_interrupt | | | apic_timer_interrupt The old case was producing unreliable callchains. Now having right frame and instruction pointers, we have the trace we want. Also syscalls and kprobe events already have the right regs, let's use them instead of wasting a retrieval. v2: Follow the rename perf_save_regs() -> perf_fetch_caller_regs() Signed-off-by: Frederic Weisbecker Cc: Ingo Molnar Cc: Thomas Gleixner Cc: H. Peter Anvin Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Steven Rostedt Cc: Arnaldo Carvalho de Melo Cc: Masami Hiramatsu Cc: Jason Baron Cc: Archs --- include/linux/ftrace_event.h | 7 +++++-- include/trace/ftrace.h | 6 +++++- kernel/perf_event.c | 8 ++------ kernel/trace/trace_event_profile.c | 3 ++- kernel/trace/trace_kprobe.c | 5 +++-- kernel/trace/trace_syscalls.c | 4 ++-- 6 files changed, 19 insertions(+), 14 deletions(-) diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 6b7c444ab8f6..ac424f18ce63 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -187,6 +187,9 @@ do { \ #ifdef CONFIG_PERF_EVENTS struct perf_event; + +DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); + extern int ftrace_profile_enable(int event_id); extern void ftrace_profile_disable(int event_id); extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, @@ -198,11 +201,11 @@ ftrace_perf_buf_prepare(int size, unsigned short type, int *rctxp, static inline void ftrace_perf_buf_submit(void *raw_data, int size, int rctx, u64 addr, - u64 count, unsigned long irq_flags) + u64 count, unsigned long irq_flags, struct pt_regs *regs) { struct trace_entry *entry = raw_data; - perf_tp_event(entry->type, addr, count, raw_data, size); + perf_tp_event(entry->type, addr, count, raw_data, size, regs); perf_swevent_put_recursion_context(rctx); local_irq_restore(irq_flags); } diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 0804cd594803..f31bb8b9777c 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -764,6 +764,7 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ struct ftrace_raw_##call *entry; \ u64 __addr = 0, __count = 1; \ unsigned long irq_flags; \ + struct pt_regs *__regs; \ int __entry_size; \ int __data_size; \ int rctx; \ @@ -784,8 +785,11 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ \ { assign; } \ \ + __regs = &__get_cpu_var(perf_trace_regs); \ + perf_fetch_caller_regs(__regs, 2); \ + \ ftrace_perf_buf_submit(entry, __entry_size, rctx, __addr, \ - __count, irq_flags); \ + __count, irq_flags, __regs); \ } #undef DEFINE_EVENT diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 359d7f690c2b..45b4b6e55891 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -4318,9 +4318,8 @@ static const struct pmu perf_ops_task_clock = { #ifdef CONFIG_EVENT_TRACING void perf_tp_event(int event_id, u64 addr, u64 count, void *record, - int entry_size) + int entry_size, struct pt_regs *regs) { - struct pt_regs *regs = get_irq_regs(); struct perf_sample_data data; struct perf_raw_record raw = { .size = entry_size, @@ -4330,12 +4329,9 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record, perf_sample_data_init(&data, addr); data.raw = &raw; - if (!regs) - regs = task_pt_regs(current); - /* Trace events already protected against recursion */ do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, - &data, regs); + &data, regs); } EXPORT_SYMBOL_GPL(perf_tp_event); diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c index f0d693005075..e66d21e15a0f 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_profile.c @@ -2,13 +2,14 @@ * trace event based perf counter profiling * * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra - * + * Copyright (C) 2009-2010 Frederic Weisbecker */ #include #include #include "trace.h" +DEFINE_PER_CPU(struct pt_regs, perf_trace_regs); static char *perf_trace_buf; static char *perf_trace_buf_nmi; diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 505c92273b1a..f7a20a8bfb31 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1240,7 +1240,7 @@ static __kprobes void kprobe_profile_func(struct kprobe *kp, for (i = 0; i < tp->nr_args; i++) entry->args[i] = call_fetch(&tp->args[i].fetch, regs); - ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags); + ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs); } /* Kretprobe profile handler */ @@ -1271,7 +1271,8 @@ static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri, for (i = 0; i < tp->nr_args; i++) entry->args[i] = call_fetch(&tp->args[i].fetch, regs); - ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1, irq_flags); + ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1, + irq_flags, regs); } static int probe_profile_enable(struct ftrace_event_call *call) diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index cba47d7935cc..7e6e84fb7b6c 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -467,7 +467,7 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) rec->nr = syscall_nr; syscall_get_arguments(current, regs, 0, sys_data->nb_args, (unsigned long *)&rec->args); - ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags); + ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags, regs); } int prof_sysenter_enable(struct ftrace_event_call *call) @@ -542,7 +542,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) rec->nr = syscall_nr; rec->ret = syscall_get_return_value(current, regs); - ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags); + ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags, regs); } int prof_sysexit_enable(struct ftrace_event_call *call) -- cgit v1.2.3 From 97d5a22005f38057b4bc0d95f81cd26510268794 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 5 Mar 2010 05:35:37 +0100 Subject: perf: Drop the obsolete profile naming for trace events Drop the obsolete "profile" naming used by perf for trace events. Perf can now do more than simple events counting, so generalize the API naming. Signed-off-by: Frederic Weisbecker Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Steven Rostedt Cc: Masami Hiramatsu Cc: Jason Baron --- include/linux/ftrace_event.h | 16 ++-- include/linux/syscalls.h | 24 +++--- include/trace/ftrace.h | 38 ++++----- include/trace/syscall.h | 8 +- kernel/perf_event.c | 4 +- kernel/trace/Makefile | 2 +- kernel/trace/trace_event_perf.c | 165 +++++++++++++++++++++++++++++++++++++ kernel/trace/trace_event_profile.c | 165 ------------------------------------- kernel/trace/trace_events.c | 2 +- kernel/trace/trace_kprobe.c | 28 +++---- kernel/trace/trace_syscalls.c | 72 ++++++++-------- 11 files changed, 262 insertions(+), 262 deletions(-) create mode 100644 kernel/trace/trace_event_perf.c delete mode 100644 kernel/trace/trace_event_profile.c diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index ac424f18ce63..c0f4b364c711 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -131,12 +131,12 @@ struct ftrace_event_call { void *mod; void *data; - int profile_count; - int (*profile_enable)(struct ftrace_event_call *); - void (*profile_disable)(struct ftrace_event_call *); + int perf_refcount; + int (*perf_event_enable)(struct ftrace_event_call *); + void (*perf_event_disable)(struct ftrace_event_call *); }; -#define FTRACE_MAX_PROFILE_SIZE 2048 +#define PERF_MAX_TRACE_SIZE 2048 #define MAX_FILTER_PRED 32 #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ @@ -190,17 +190,17 @@ struct perf_event; DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); -extern int ftrace_profile_enable(int event_id); -extern void ftrace_profile_disable(int event_id); +extern int perf_trace_enable(int event_id); +extern void perf_trace_disable(int event_id); extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, char *filter_str); extern void ftrace_profile_free_filter(struct perf_event *event); extern void * -ftrace_perf_buf_prepare(int size, unsigned short type, int *rctxp, +perf_trace_buf_prepare(int size, unsigned short type, int *rctxp, unsigned long *irq_flags); static inline void -ftrace_perf_buf_submit(void *raw_data, int size, int rctx, u64 addr, +perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, u64 count, unsigned long irq_flags, struct pt_regs *regs) { struct trace_entry *entry = raw_data; diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 8126f239edf0..51435bcc3460 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -101,18 +101,18 @@ struct perf_event_attr; #ifdef CONFIG_PERF_EVENTS -#define TRACE_SYS_ENTER_PROFILE_INIT(sname) \ - .profile_enable = prof_sysenter_enable, \ - .profile_disable = prof_sysenter_disable, +#define TRACE_SYS_ENTER_PERF_INIT(sname) \ + .perf_event_enable = perf_sysenter_enable, \ + .perf_event_disable = perf_sysenter_disable, -#define TRACE_SYS_EXIT_PROFILE_INIT(sname) \ - .profile_enable = prof_sysexit_enable, \ - .profile_disable = prof_sysexit_disable, +#define TRACE_SYS_EXIT_PERF_INIT(sname) \ + .perf_event_enable = perf_sysexit_enable, \ + .perf_event_disable = perf_sysexit_disable, #else -#define TRACE_SYS_ENTER_PROFILE(sname) -#define TRACE_SYS_ENTER_PROFILE_INIT(sname) -#define TRACE_SYS_EXIT_PROFILE(sname) -#define TRACE_SYS_EXIT_PROFILE_INIT(sname) +#define TRACE_SYS_ENTER_PERF(sname) +#define TRACE_SYS_ENTER_PERF_INIT(sname) +#define TRACE_SYS_EXIT_PERF(sname) +#define TRACE_SYS_EXIT_PERF_INIT(sname) #endif /* CONFIG_PERF_EVENTS */ #ifdef CONFIG_FTRACE_SYSCALLS @@ -149,7 +149,7 @@ struct perf_event_attr; .regfunc = reg_event_syscall_enter, \ .unregfunc = unreg_event_syscall_enter, \ .data = (void *)&__syscall_meta_##sname,\ - TRACE_SYS_ENTER_PROFILE_INIT(sname) \ + TRACE_SYS_ENTER_PERF_INIT(sname) \ } #define SYSCALL_TRACE_EXIT_EVENT(sname) \ @@ -171,7 +171,7 @@ struct perf_event_attr; .regfunc = reg_event_syscall_exit, \ .unregfunc = unreg_event_syscall_exit, \ .data = (void *)&__syscall_meta_##sname,\ - TRACE_SYS_EXIT_PROFILE_INIT(sname) \ + TRACE_SYS_EXIT_PERF_INIT(sname) \ } #define SYSCALL_METADATA(sname, nb) \ diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index f31bb8b9777c..25ab56f75d65 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -401,18 +401,18 @@ static inline notrace int ftrace_get_offsets_##call( \ #undef DEFINE_EVENT #define DEFINE_EVENT(template, name, proto, args) \ \ -static void ftrace_profile_##name(proto); \ +static void perf_trace_##name(proto); \ \ static notrace int \ -ftrace_profile_enable_##name(struct ftrace_event_call *unused) \ +perf_trace_enable_##name(struct ftrace_event_call *unused) \ { \ - return register_trace_##name(ftrace_profile_##name); \ + return register_trace_##name(perf_trace_##name); \ } \ \ static notrace void \ -ftrace_profile_disable_##name(struct ftrace_event_call *unused) \ +perf_trace_disable_##name(struct ftrace_event_call *unused) \ { \ - unregister_trace_##name(ftrace_profile_##name); \ + unregister_trace_##name(perf_trace_##name); \ } #undef DEFINE_EVENT_PRINT @@ -507,12 +507,12 @@ ftrace_profile_disable_##name(struct ftrace_event_call *unused) \ #ifdef CONFIG_PERF_EVENTS -#define _TRACE_PROFILE_INIT(call) \ - .profile_enable = ftrace_profile_enable_##call, \ - .profile_disable = ftrace_profile_disable_##call, +#define _TRACE_PERF_INIT(call) \ + .perf_event_enable = perf_trace_enable_##call, \ + .perf_event_disable = perf_trace_disable_##call, #else -#define _TRACE_PROFILE_INIT(call) +#define _TRACE_PERF_INIT(call) #endif /* CONFIG_PERF_EVENTS */ #undef __entry @@ -638,7 +638,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ .unregfunc = ftrace_raw_unreg_event_##call, \ .print_fmt = print_fmt_##template, \ .define_fields = ftrace_define_fields_##template, \ - _TRACE_PROFILE_INIT(call) \ + _TRACE_PERF_INIT(call) \ } #undef DEFINE_EVENT_PRINT @@ -657,18 +657,18 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ .unregfunc = ftrace_raw_unreg_event_##call, \ .print_fmt = print_fmt_##call, \ .define_fields = ftrace_define_fields_##template, \ - _TRACE_PROFILE_INIT(call) \ + _TRACE_PERF_INIT(call) \ } #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) /* - * Define the insertion callback to profile events + * Define the insertion callback to perf events * * The job is very similar to ftrace_raw_event_ except that we don't * insert in the ring buffer but in a perf counter. * - * static void ftrace_profile_(proto) + * static void ftrace_perf_(proto) * { * struct ftrace_data_offsets_ __maybe_unused __data_offsets; * struct ftrace_event_call *event_call = &event_; @@ -757,7 +757,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ static notrace void \ -ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ +perf_trace_templ_##call(struct ftrace_event_call *event_call, \ proto) \ { \ struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ @@ -774,10 +774,10 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ sizeof(u64)); \ __entry_size -= sizeof(u32); \ \ - if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \ + if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \ "profile buffer not large enough")) \ return; \ - entry = (struct ftrace_raw_##call *)ftrace_perf_buf_prepare( \ + entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \ __entry_size, event_call->id, &rctx, &irq_flags); \ if (!entry) \ return; \ @@ -788,17 +788,17 @@ ftrace_profile_templ_##call(struct ftrace_event_call *event_call, \ __regs = &__get_cpu_var(perf_trace_regs); \ perf_fetch_caller_regs(__regs, 2); \ \ - ftrace_perf_buf_submit(entry, __entry_size, rctx, __addr, \ + perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ __count, irq_flags, __regs); \ } #undef DEFINE_EVENT #define DEFINE_EVENT(template, call, proto, args) \ -static notrace void ftrace_profile_##call(proto) \ +static notrace void perf_trace_##call(proto) \ { \ struct ftrace_event_call *event_call = &event_##call; \ \ - ftrace_profile_templ_##template(event_call, args); \ + perf_trace_templ_##template(event_call, args); \ } #undef DEFINE_EVENT_PRINT diff --git a/include/trace/syscall.h b/include/trace/syscall.h index 0387100752f0..e5e5f48dbfb3 100644 --- a/include/trace/syscall.h +++ b/include/trace/syscall.h @@ -47,10 +47,10 @@ enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags); #endif #ifdef CONFIG_PERF_EVENTS -int prof_sysenter_enable(struct ftrace_event_call *call); -void prof_sysenter_disable(struct ftrace_event_call *call); -int prof_sysexit_enable(struct ftrace_event_call *call); -void prof_sysexit_disable(struct ftrace_event_call *call); +int perf_sysenter_enable(struct ftrace_event_call *call); +void perf_sysenter_disable(struct ftrace_event_call *call); +int perf_sysexit_enable(struct ftrace_event_call *call); +void perf_sysexit_disable(struct ftrace_event_call *call); #endif #endif /* _TRACE_SYSCALL_H */ diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 45b4b6e55891..c502b18594cc 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -4347,7 +4347,7 @@ static int perf_tp_event_match(struct perf_event *event, static void tp_perf_event_destroy(struct perf_event *event) { - ftrace_profile_disable(event->attr.config); + perf_trace_disable(event->attr.config); } static const struct pmu *tp_perf_event_init(struct perf_event *event) @@ -4361,7 +4361,7 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event) !capable(CAP_SYS_ADMIN)) return ERR_PTR(-EPERM); - if (ftrace_profile_enable(event->attr.config)) + if (perf_trace_enable(event->attr.config)) return NULL; event->destroy = tp_perf_event_destroy; diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index d00c6fe23f54..78edc6490038 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -52,7 +52,7 @@ obj-$(CONFIG_EVENT_TRACING) += trace_events.o obj-$(CONFIG_EVENT_TRACING) += trace_export.o obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o ifeq ($(CONFIG_PERF_EVENTS),y) -obj-$(CONFIG_EVENT_TRACING) += trace_event_profile.o +obj-$(CONFIG_EVENT_TRACING) += trace_event_perf.o endif obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c new file mode 100644 index 000000000000..f315b12a41d8 --- /dev/null +++ b/kernel/trace/trace_event_perf.c @@ -0,0 +1,165 @@ +/* + * trace event based perf event profiling/tracing + * + * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra + * Copyright (C) 2009-2010 Frederic Weisbecker + */ + +#include +#include +#include "trace.h" + +DEFINE_PER_CPU(struct pt_regs, perf_trace_regs); + +static char *perf_trace_buf; +static char *perf_trace_buf_nmi; + +typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ; + +/* Count the events in use (per event id, not per instance) */ +static int total_ref_count; + +static int perf_trace_event_enable(struct ftrace_event_call *event) +{ + char *buf; + int ret = -ENOMEM; + + if (event->perf_refcount++ > 0) + return 0; + + if (!total_ref_count) { + buf = (char *)alloc_percpu(perf_trace_t); + if (!buf) + goto fail_buf; + + rcu_assign_pointer(perf_trace_buf, buf); + + buf = (char *)alloc_percpu(perf_trace_t); + if (!buf) + goto fail_buf_nmi; + + rcu_assign_pointer(perf_trace_buf_nmi, buf); + } + + ret = event->perf_event_enable(event); + if (!ret) { + total_ref_count++; + return 0; + } + +fail_buf_nmi: + if (!total_ref_count) { + free_percpu(perf_trace_buf_nmi); + free_percpu(perf_trace_buf); + perf_trace_buf_nmi = NULL; + perf_trace_buf = NULL; + } +fail_buf: + event->perf_refcount--; + + return ret; +} + +int perf_trace_enable(int event_id) +{ + struct ftrace_event_call *event; + int ret = -EINVAL; + + mutex_lock(&event_mutex); + list_for_each_entry(event, &ftrace_events, list) { + if (event->id == event_id && event->perf_event_enable && + try_module_get(event->mod)) { + ret = perf_trace_event_enable(event); + break; + } + } + mutex_unlock(&event_mutex); + + return ret; +} + +static void perf_trace_event_disable(struct ftrace_event_call *event) +{ + char *buf, *nmi_buf; + + if (--event->perf_refcount > 0) + return; + + event->perf_event_disable(event); + + if (!--total_ref_count) { + buf = perf_trace_buf; + rcu_assign_pointer(perf_trace_buf, NULL); + + nmi_buf = perf_trace_buf_nmi; + rcu_assign_pointer(perf_trace_buf_nmi, NULL); + + /* + * Ensure every events in profiling have finished before + * releasing the buffers + */ + synchronize_sched(); + + free_percpu(buf); + free_percpu(nmi_buf); + } +} + +void perf_trace_disable(int event_id) +{ + struct ftrace_event_call *event; + + mutex_lock(&event_mutex); + list_for_each_entry(event, &ftrace_events, list) { + if (event->id == event_id) { + perf_trace_event_disable(event); + module_put(event->mod); + break; + } + } + mutex_unlock(&event_mutex); +} + +__kprobes void *perf_trace_buf_prepare(int size, unsigned short type, + int *rctxp, unsigned long *irq_flags) +{ + struct trace_entry *entry; + char *trace_buf, *raw_data; + int pc, cpu; + + pc = preempt_count(); + + /* Protect the per cpu buffer, begin the rcu read side */ + local_irq_save(*irq_flags); + + *rctxp = perf_swevent_get_recursion_context(); + if (*rctxp < 0) + goto err_recursion; + + cpu = smp_processor_id(); + + if (in_nmi()) + trace_buf = rcu_dereference(perf_trace_buf_nmi); + else + trace_buf = rcu_dereference(perf_trace_buf); + + if (!trace_buf) + goto err; + + raw_data = per_cpu_ptr(trace_buf, cpu); + + /* zero the dead bytes from align to not leak stack to user */ + *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; + + entry = (struct trace_entry *)raw_data; + tracing_generic_entry_update(entry, *irq_flags, pc); + entry->type = type; + + return raw_data; +err: + perf_swevent_put_recursion_context(*rctxp); +err_recursion: + local_irq_restore(*irq_flags); + return NULL; +} +EXPORT_SYMBOL_GPL(perf_trace_buf_prepare); diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c deleted file mode 100644 index e66d21e15a0f..000000000000 --- a/kernel/trace/trace_event_profile.c +++ /dev/null @@ -1,165 +0,0 @@ -/* - * trace event based perf counter profiling - * - * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra - * Copyright (C) 2009-2010 Frederic Weisbecker - */ - -#include -#include -#include "trace.h" - -DEFINE_PER_CPU(struct pt_regs, perf_trace_regs); - -static char *perf_trace_buf; -static char *perf_trace_buf_nmi; - -typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ; - -/* Count the events in use (per event id, not per instance) */ -static int total_profile_count; - -static int ftrace_profile_enable_event(struct ftrace_event_call *event) -{ - char *buf; - int ret = -ENOMEM; - - if (event->profile_count++ > 0) - return 0; - - if (!total_profile_count) { - buf = (char *)alloc_percpu(perf_trace_t); - if (!buf) - goto fail_buf; - - rcu_assign_pointer(perf_trace_buf, buf); - - buf = (char *)alloc_percpu(perf_trace_t); - if (!buf) - goto fail_buf_nmi; - - rcu_assign_pointer(perf_trace_buf_nmi, buf); - } - - ret = event->profile_enable(event); - if (!ret) { - total_profile_count++; - return 0; - } - -fail_buf_nmi: - if (!total_profile_count) { - free_percpu(perf_trace_buf_nmi); - free_percpu(perf_trace_buf); - perf_trace_buf_nmi = NULL; - perf_trace_buf = NULL; - } -fail_buf: - event->profile_count--; - - return ret; -} - -int ftrace_profile_enable(int event_id) -{ - struct ftrace_event_call *event; - int ret = -EINVAL; - - mutex_lock(&event_mutex); - list_for_each_entry(event, &ftrace_events, list) { - if (event->id == event_id && event->profile_enable && - try_module_get(event->mod)) { - ret = ftrace_profile_enable_event(event); - break; - } - } - mutex_unlock(&event_mutex); - - return ret; -} - -static void ftrace_profile_disable_event(struct ftrace_event_call *event) -{ - char *buf, *nmi_buf; - - if (--event->profile_count > 0) - return; - - event->profile_disable(event); - - if (!--total_profile_count) { - buf = perf_trace_buf; - rcu_assign_pointer(perf_trace_buf, NULL); - - nmi_buf = perf_trace_buf_nmi; - rcu_assign_pointer(perf_trace_buf_nmi, NULL); - - /* - * Ensure every events in profiling have finished before - * releasing the buffers - */ - synchronize_sched(); - - free_percpu(buf); - free_percpu(nmi_buf); - } -} - -void ftrace_profile_disable(int event_id) -{ - struct ftrace_event_call *event; - - mutex_lock(&event_mutex); - list_for_each_entry(event, &ftrace_events, list) { - if (event->id == event_id) { - ftrace_profile_disable_event(event); - module_put(event->mod); - break; - } - } - mutex_unlock(&event_mutex); -} - -__kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type, - int *rctxp, unsigned long *irq_flags) -{ - struct trace_entry *entry; - char *trace_buf, *raw_data; - int pc, cpu; - - pc = preempt_count(); - - /* Protect the per cpu buffer, begin the rcu read side */ - local_irq_save(*irq_flags); - - *rctxp = perf_swevent_get_recursion_context(); - if (*rctxp < 0) - goto err_recursion; - - cpu = smp_processor_id(); - - if (in_nmi()) - trace_buf = rcu_dereference(perf_trace_buf_nmi); - else - trace_buf = rcu_dereference(perf_trace_buf); - - if (!trace_buf) - goto err; - - raw_data = per_cpu_ptr(trace_buf, cpu); - - /* zero the dead bytes from align to not leak stack to user */ - *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; - - entry = (struct trace_entry *)raw_data; - tracing_generic_entry_update(entry, *irq_flags, pc); - entry->type = type; - - return raw_data; -err: - perf_swevent_put_recursion_context(*rctxp); -err_recursion: - local_irq_restore(*irq_flags); - return NULL; -} -EXPORT_SYMBOL_GPL(ftrace_perf_buf_prepare); diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 3f972ad98d04..beab8bf2f310 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -938,7 +938,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, trace_create_file("enable", 0644, call->dir, call, enable); - if (call->id && call->profile_enable) + if (call->id && call->perf_event_enable) trace_create_file("id", 0444, call->dir, call, id); diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index f7a20a8bfb31..1251e367bae9 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1214,7 +1214,7 @@ static int set_print_fmt(struct trace_probe *tp) #ifdef CONFIG_PERF_EVENTS /* Kprobe profile handler */ -static __kprobes void kprobe_profile_func(struct kprobe *kp, +static __kprobes void kprobe_perf_func(struct kprobe *kp, struct pt_regs *regs) { struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); @@ -1227,11 +1227,11 @@ static __kprobes void kprobe_profile_func(struct kprobe *kp, __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); size = ALIGN(__size + sizeof(u32), sizeof(u64)); size -= sizeof(u32); - if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, + if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) return; - entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags); + entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags); if (!entry) return; @@ -1240,11 +1240,11 @@ static __kprobes void kprobe_profile_func(struct kprobe *kp, for (i = 0; i < tp->nr_args; i++) entry->args[i] = call_fetch(&tp->args[i].fetch, regs); - ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs); + perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs); } /* Kretprobe profile handler */ -static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri, +static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, struct pt_regs *regs) { struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); @@ -1257,11 +1257,11 @@ static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri, __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); size = ALIGN(__size + sizeof(u32), sizeof(u64)); size -= sizeof(u32); - if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, + if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) return; - entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags); + entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags); if (!entry) return; @@ -1271,11 +1271,11 @@ static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri, for (i = 0; i < tp->nr_args; i++) entry->args[i] = call_fetch(&tp->args[i].fetch, regs); - ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1, + perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, irq_flags, regs); } -static int probe_profile_enable(struct ftrace_event_call *call) +static int probe_perf_enable(struct ftrace_event_call *call) { struct trace_probe *tp = (struct trace_probe *)call->data; @@ -1287,7 +1287,7 @@ static int probe_profile_enable(struct ftrace_event_call *call) return enable_kprobe(&tp->rp.kp); } -static void probe_profile_disable(struct ftrace_event_call *call) +static void probe_perf_disable(struct ftrace_event_call *call) { struct trace_probe *tp = (struct trace_probe *)call->data; @@ -1312,7 +1312,7 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) kprobe_trace_func(kp, regs); #ifdef CONFIG_PERF_EVENTS if (tp->flags & TP_FLAG_PROFILE) - kprobe_profile_func(kp, regs); + kprobe_perf_func(kp, regs); #endif return 0; /* We don't tweek kernel, so just return 0 */ } @@ -1326,7 +1326,7 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) kretprobe_trace_func(ri, regs); #ifdef CONFIG_PERF_EVENTS if (tp->flags & TP_FLAG_PROFILE) - kretprobe_profile_func(ri, regs); + kretprobe_perf_func(ri, regs); #endif return 0; /* We don't tweek kernel, so just return 0 */ } @@ -1359,8 +1359,8 @@ static int register_probe_event(struct trace_probe *tp) call->unregfunc = probe_event_disable; #ifdef CONFIG_PERF_EVENTS - call->profile_enable = probe_profile_enable; - call->profile_disable = probe_profile_disable; + call->perf_event_enable = probe_perf_enable; + call->perf_event_disable = probe_perf_disable; #endif call->data = tp; ret = trace_add_event_call(call); diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 7e6e84fb7b6c..33c2a5b769dc 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -428,12 +428,12 @@ core_initcall(init_ftrace_syscalls); #ifdef CONFIG_PERF_EVENTS -static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls); -static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls); -static int sys_prof_refcount_enter; -static int sys_prof_refcount_exit; +static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls); +static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls); +static int sys_perf_refcount_enter; +static int sys_perf_refcount_exit; -static void prof_syscall_enter(struct pt_regs *regs, long id) +static void perf_syscall_enter(struct pt_regs *regs, long id) { struct syscall_metadata *sys_data; struct syscall_trace_enter *rec; @@ -443,7 +443,7 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) int size; syscall_nr = syscall_get_nr(current, regs); - if (!test_bit(syscall_nr, enabled_prof_enter_syscalls)) + if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) return; sys_data = syscall_nr_to_meta(syscall_nr); @@ -455,11 +455,11 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) size = ALIGN(size + sizeof(u32), sizeof(u64)); size -= sizeof(u32); - if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, - "profile buffer not large enough")) + if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, + "perf buffer not large enough")) return; - rec = (struct syscall_trace_enter *)ftrace_perf_buf_prepare(size, + rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size, sys_data->enter_event->id, &rctx, &flags); if (!rec) return; @@ -467,10 +467,10 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) rec->nr = syscall_nr; syscall_get_arguments(current, regs, 0, sys_data->nb_args, (unsigned long *)&rec->args); - ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags, regs); + perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs); } -int prof_sysenter_enable(struct ftrace_event_call *call) +int perf_sysenter_enable(struct ftrace_event_call *call) { int ret = 0; int num; @@ -478,34 +478,34 @@ int prof_sysenter_enable(struct ftrace_event_call *call) num = ((struct syscall_metadata *)call->data)->syscall_nr; mutex_lock(&syscall_trace_lock); - if (!sys_prof_refcount_enter) - ret = register_trace_sys_enter(prof_syscall_enter); + if (!sys_perf_refcount_enter) + ret = register_trace_sys_enter(perf_syscall_enter); if (ret) { pr_info("event trace: Could not activate" "syscall entry trace point"); } else { - set_bit(num, enabled_prof_enter_syscalls); - sys_prof_refcount_enter++; + set_bit(num, enabled_perf_enter_syscalls); + sys_perf_refcount_enter++; } mutex_unlock(&syscall_trace_lock); return ret; } -void prof_sysenter_disable(struct ftrace_event_call *call) +void perf_sysenter_disable(struct ftrace_event_call *call) { int num; num = ((struct syscall_metadata *)call->data)->syscall_nr; mutex_lock(&syscall_trace_lock); - sys_prof_refcount_enter--; - clear_bit(num, enabled_prof_enter_syscalls); - if (!sys_prof_refcount_enter) - unregister_trace_sys_enter(prof_syscall_enter); + sys_perf_refcount_enter--; + clear_bit(num, enabled_perf_enter_syscalls); + if (!sys_perf_refcount_enter) + unregister_trace_sys_enter(perf_syscall_enter); mutex_unlock(&syscall_trace_lock); } -static void prof_syscall_exit(struct pt_regs *regs, long ret) +static void perf_syscall_exit(struct pt_regs *regs, long ret) { struct syscall_metadata *sys_data; struct syscall_trace_exit *rec; @@ -515,7 +515,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) int size; syscall_nr = syscall_get_nr(current, regs); - if (!test_bit(syscall_nr, enabled_prof_exit_syscalls)) + if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) return; sys_data = syscall_nr_to_meta(syscall_nr); @@ -530,11 +530,11 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) * Impossible, but be paranoid with the future * How to put this check outside runtime? */ - if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, - "exit event has grown above profile buffer size")) + if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, + "exit event has grown above perf buffer size")) return; - rec = (struct syscall_trace_exit *)ftrace_perf_buf_prepare(size, + rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size, sys_data->exit_event->id, &rctx, &flags); if (!rec) return; @@ -542,10 +542,10 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) rec->nr = syscall_nr; rec->ret = syscall_get_return_value(current, regs); - ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags, regs); + perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs); } -int prof_sysexit_enable(struct ftrace_event_call *call) +int perf_sysexit_enable(struct ftrace_event_call *call) { int ret = 0; int num; @@ -553,30 +553,30 @@ int prof_sysexit_enable(struct ftrace_event_call *call) num = ((struct syscall_metadata *)call->data)->syscall_nr; mutex_lock(&syscall_trace_lock); - if (!sys_prof_refcount_exit) - ret = register_trace_sys_exit(prof_syscall_exit); + if (!sys_perf_refcount_exit) + ret = register_trace_sys_exit(perf_syscall_exit); if (ret) { pr_info("event trace: Could not activate" "syscall exit trace point"); } else { - set_bit(num, enabled_prof_exit_syscalls); - sys_prof_refcount_exit++; + set_bit(num, enabled_perf_exit_syscalls); + sys_perf_refcount_exit++; } mutex_unlock(&syscall_trace_lock); return ret; } -void prof_sysexit_disable(struct ftrace_event_call *call) +void perf_sysexit_disable(struct ftrace_event_call *call) { int num; num = ((struct syscall_metadata *)call->data)->syscall_nr; mutex_lock(&syscall_trace_lock); - sys_prof_refcount_exit--; - clear_bit(num, enabled_prof_exit_syscalls); - if (!sys_prof_refcount_exit) - unregister_trace_sys_exit(prof_syscall_exit); + sys_perf_refcount_exit--; + clear_bit(num, enabled_perf_exit_syscalls); + if (!sys_perf_refcount_exit) + unregister_trace_sys_exit(perf_syscall_exit); mutex_unlock(&syscall_trace_lock); } -- cgit v1.2.3 From 2886539d5e649c22a6d2107eb431d3bee81e0e6d Mon Sep 17 00:00:00 2001 From: Rafi Rubin Date: Wed, 10 Mar 2010 16:10:28 +0100 Subject: HID: ntrig: fix touch events This reinstates the lost unpressing of BTN_TOUCH. To prevent undesireably touch toggles this also deals with tip switch events. Added a trap to prevent going out of bounds for hidinputs with empty reports. Clear bits of unused buttons which result in misidentification. Signed-off-by: Rafi Rubin Signed-off-by: Jiri Kosina --- drivers/hid/hid-ntrig.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c index 3234c729a895..edcc0c4247bb 100644 --- a/drivers/hid/hid-ntrig.c +++ b/drivers/hid/hid-ntrig.c @@ -140,6 +140,9 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field, nd->reading_mt = 1; nd->first_contact_confidence = 0; break; + case HID_DG_TIPSWITCH: + /* Prevent emission of touch until validated */ + return 1; case HID_DG_CONFIDENCE: nd->confidence = value; break; @@ -259,6 +262,7 @@ static int ntrig_event (struct hid_device *hid, struct hid_field *field, BTN_TOOL_TRIPLETAP, 0); input_report_key(input, BTN_TOOL_QUADTAP, 0); + input_report_key(input, BTN_TOUCH, 0); } break; @@ -308,13 +312,20 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id) list_for_each_entry(hidinput, &hdev->inputs, list) { + if (hidinput->report->maxfield < 1) + continue; + input = hidinput->input; switch (hidinput->report->field[0]->application) { case HID_DG_PEN: input->name = "N-Trig Pen"; break; case HID_DG_TOUCHSCREEN: + /* These keys are redundant for fingers, clear them + * to prevent incorrect identification */ __clear_bit(BTN_TOOL_PEN, input->keybit); + __clear_bit(BTN_TOOL_FINGER, input->keybit); + __clear_bit(BTN_0, input->keybit); /* * A little something special to enable * two and three finger taps. -- cgit v1.2.3 From b4d2314bb88b07e5a04e6c75b442a1dfcd60e340 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 10 Mar 2010 15:21:44 -0500 Subject: NFSv4: Don't ignore the NFS_INO_REVAL_FORCED flag in nfs_revalidate_inode() If the NFS_INO_REVAL_FORCED flag is set, that means that we don't yet have an up to date attribute cache. Even if we hold a delegation, we must put a GETATTR on the wire. Signed-off-by: Trond Myklebust Cc: stable@kernel.org --- fs/nfs/delegation.h | 6 ++++++ fs/nfs/dir.c | 2 +- fs/nfs/inode.c | 2 +- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h index 944b627ec6e1..69e7b8140122 100644 --- a/fs/nfs/delegation.h +++ b/fs/nfs/delegation.h @@ -71,4 +71,10 @@ static inline int nfs_inode_return_delegation(struct inode *inode) } #endif +static inline int nfs_have_delegated_attributes(struct inode *inode) +{ + return nfs_have_delegation(inode, FMODE_READ) && + !(NFS_I(inode)->cache_validity & NFS_INO_REVAL_FORCED); +} + #endif diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index a1f6b4438fb1..c6f2750648f4 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1789,7 +1789,7 @@ static int nfs_access_get_cached(struct inode *inode, struct rpc_cred *cred, str cache = nfs_access_search_rbtree(inode, cred); if (cache == NULL) goto out; - if (!nfs_have_delegation(inode, FMODE_READ) && + if (!nfs_have_delegated_attributes(inode) && !time_in_range_open(jiffies, cache->jiffies, cache->jiffies + nfsi->attrtimeo)) goto out_stale; res->jiffies = cache->jiffies; diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 657201acda84..e358df75a6ad 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -729,7 +729,7 @@ int nfs_attribute_timeout(struct inode *inode) { struct nfs_inode *nfsi = NFS_I(inode); - if (nfs_have_delegation(inode, FMODE_READ)) + if (nfs_have_delegated_attributes(inode)) return 0; return !time_in_range_open(jiffies, nfsi->read_cache_jiffies, nfsi->read_cache_jiffies + nfsi->attrtimeo); } -- cgit v1.2.3 From 07081fd8587478849b69d7b41596e81ff5a7f532 Mon Sep 17 00:00:00 2001 From: David Miller Date: Wed, 10 Mar 2010 14:05:35 -0700 Subject: uartlite: Fix build on sparc. We can get this driver enabled via MFD_TIMBERDALE which only requires GPIO to be on. But the of_address_to_resource() function is only present on powerpc and microblaze, so we have to conditionalize the CONFIG_OF probing bits on that. Signed-off-by: David S. Miller Signed-off-by: Grant Likely --- drivers/serial/uartlite.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/serial/uartlite.c b/drivers/serial/uartlite.c index ab2ab3c81834..f0a6c61b17f7 100644 --- a/drivers/serial/uartlite.c +++ b/drivers/serial/uartlite.c @@ -19,7 +19,7 @@ #include #include #include -#if defined(CONFIG_OF) +#if defined(CONFIG_OF) && (defined(CONFIG_PPC32) || defined(CONFIG_MICROBLAZE)) #include #include #include @@ -581,7 +581,7 @@ static struct platform_driver ulite_platform_driver = { /* --------------------------------------------------------------------- * OF bus bindings */ -#if defined(CONFIG_OF) +#if defined(CONFIG_OF) && (defined(CONFIG_PPC32) || defined(CONFIG_MICROBLAZE)) static int __devinit ulite_of_probe(struct of_device *op, const struct of_device_id *match) { @@ -631,11 +631,11 @@ static inline void __exit ulite_of_unregister(void) { of_unregister_platform_driver(&ulite_of_driver); } -#else /* CONFIG_OF */ -/* CONFIG_OF not enabled; do nothing helpers */ +#else /* CONFIG_OF && (CONFIG_PPC32 || CONFIG_MICROBLAZE) */ +/* Appropriate config not enabled; do nothing helpers */ static inline int __init ulite_of_register(void) { return 0; } static inline void __exit ulite_of_unregister(void) { } -#endif /* CONFIG_OF */ +#endif /* CONFIG_OF && (CONFIG_PPC32 || CONFIG_MICROBLAZE) */ /* --------------------------------------------------------------------- * Module setup/teardown -- cgit v1.2.3 From 5e7749436d576a525d7b2a4bcffb17b3364b9e00 Mon Sep 17 00:00:00 2001 From: Scott Ellis Date: Wed, 10 Mar 2010 14:22:45 -0700 Subject: spi/omap2_mcspi: fix NULL pointer dereference Check spi->controller_state before dereferencing. Shows up NULL here when using spi_alloc_device()/spi_add_device() and spi_add_device() fails before spi_setup(). Calling spi_dev_put() on the leftover spi_device results in the error. Signed-off-by: Scott Ellis Signed-off-by: Grant Likely --- drivers/spi/omap2_mcspi.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c index 715c518b1b68..87d44eeaaa7b 100644 --- a/drivers/spi/omap2_mcspi.c +++ b/drivers/spi/omap2_mcspi.c @@ -751,11 +751,13 @@ static void omap2_mcspi_cleanup(struct spi_device *spi) mcspi = spi_master_get_devdata(spi->master); mcspi_dma = &mcspi->dma_channels[spi->chip_select]; - /* Unlink controller state from context save list */ - cs = spi->controller_state; - list_del(&cs->node); + if (spi->controller_state) { + /* Unlink controller state from context save list */ + cs = spi->controller_state; + list_del(&cs->node); - kfree(spi->controller_state); + kfree(spi->controller_state); + } if (mcspi_dma->dma_rx_channel != -1) { omap_free_dma(mcspi_dma->dma_rx_channel); -- cgit v1.2.3 From 9bd4517ddc51c803784778ab52e6f0bc03b77a52 Mon Sep 17 00:00:00 2001 From: Scott Ellis Date: Wed, 10 Mar 2010 14:23:13 -0700 Subject: spi/omap2_mcspi: Use transaction speed if provided omap2_mcspi_transfer() gets called in omap2_mcspi_work() when the transaction speed_hz or bits_per_word fields are non-zero. omap2_mcspi_transfer() does not look at the speed_hz field so the override speed value is ignored. The code should probably change to one of these options. 1. Skip the call to omap2_mcsp_transfer() if the only reason was a non-zero speed_hz and it's not going to be used. 2. Use the new speed_hz value provided The patch below uses the speed_hz value. Signed-off-by: Scott Ellis Signed-off-by: Grant Likely --- drivers/spi/omap2_mcspi.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c index 87d44eeaaa7b..4dd786b99b8b 100644 --- a/drivers/spi/omap2_mcspi.c +++ b/drivers/spi/omap2_mcspi.c @@ -578,6 +578,7 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi, struct spi_master *spi_cntrl; u32 l = 0, div = 0; u8 word_len = spi->bits_per_word; + u32 speed_hz = spi->max_speed_hz; mcspi = spi_master_get_devdata(spi->master); spi_cntrl = mcspi->master; @@ -587,9 +588,12 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi, cs->word_len = word_len; - if (spi->max_speed_hz) { + if (t && t->speed_hz) + speed_hz = t->speed_hz; + + if (speed_hz) { while (div <= 15 && (OMAP2_MCSPI_MAX_FREQ / (1 << div)) - > spi->max_speed_hz) + > speed_hz) div++; } else div = 15; -- cgit v1.2.3 From acc6a0935e5958ad1a1b99a1c6f44a52264bedf0 Mon Sep 17 00:00:00 2001 From: Grant Likely Date: Wed, 10 Mar 2010 14:39:50 -0700 Subject: powerpc/52xx: update defconfigs Signed-off-by: Grant Likely --- arch/powerpc/configs/52xx/cm5200_defconfig | 24 ++--- arch/powerpc/configs/52xx/lite5200b_defconfig | 28 +++--- arch/powerpc/configs/52xx/motionpro_defconfig | 25 +++-- arch/powerpc/configs/52xx/pcm030_defconfig | 27 +++--- arch/powerpc/configs/52xx/tqm5200_defconfig | 28 +++--- arch/powerpc/configs/mpc5200_defconfig | 134 +++++++++++++++++--------- 6 files changed, 160 insertions(+), 106 deletions(-) diff --git a/arch/powerpc/configs/52xx/cm5200_defconfig b/arch/powerpc/configs/52xx/cm5200_defconfig index ff9bdb28197d..218d49b36a0c 100644 --- a/arch/powerpc/configs/52xx/cm5200_defconfig +++ b/arch/powerpc/configs/52xx/cm5200_defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.33-rc2 -# Wed Dec 30 14:45:07 2009 +# Linux kernel version: 2.6.34-rc1 +# Wed Mar 10 14:34:22 2010 # # CONFIG_PPC64 is not set @@ -94,11 +94,6 @@ CONFIG_RCU_FANOUT=32 # CONFIG_TREE_RCU_TRACE is not set # CONFIG_IKCONFIG is not set CONFIG_LOG_BUF_SHIFT=14 -CONFIG_GROUP_SCHED=y -CONFIG_FAIR_GROUP_SCHED=y -# CONFIG_RT_GROUP_SCHED is not set -CONFIG_USER_SCHED=y -# CONFIG_CGROUP_SCHED is not set # CONFIG_CGROUPS is not set CONFIG_SYSFS_DEPRECATED=y CONFIG_SYSFS_DEPRECATED_V2=y @@ -109,6 +104,7 @@ CONFIG_INITRAMFS_SOURCE="" CONFIG_RD_GZIP=y # CONFIG_RD_BZIP2 is not set # CONFIG_RD_LZMA is not set +# CONFIG_RD_LZO is not set # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SYSCTL=y CONFIG_ANON_INODES=y @@ -340,7 +336,6 @@ CONFIG_NET=y # Networking options # CONFIG_PACKET=y -# CONFIG_PACKET_MMAP is not set CONFIG_UNIX=y CONFIG_XFRM=y CONFIG_XFRM_USER=y @@ -517,6 +512,8 @@ CONFIG_MTD_PHYSMAP_OF=y # UBI - Unsorted block images # # CONFIG_MTD_UBI is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_DYNAMIC=y CONFIG_OF_DEVICE=y CONFIG_OF_I2C=y CONFIG_OF_MDIO=y @@ -684,6 +681,7 @@ CONFIG_SERIAL_CORE_CONSOLE=y CONFIG_SERIAL_MPC52xx=y CONFIG_SERIAL_MPC52xx_CONSOLE=y CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=57600 +# CONFIG_SERIAL_TIMBERDALE is not set # CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set CONFIG_UNIX98_PTYS=y # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set @@ -714,6 +712,7 @@ CONFIG_I2C_HELPER_AUTO=y CONFIG_I2C_MPC=y # CONFIG_I2C_OCORES is not set # CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set # # External I2C/SMBus adapter drivers @@ -754,6 +753,7 @@ CONFIG_WATCHDOG=y # Watchdog Device Drivers # # CONFIG_SOFT_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set # CONFIG_MPC5200_WDT is not set # @@ -771,18 +771,20 @@ CONFIG_SSB_POSSIBLE=y # Multifunction device drivers # # CONFIG_MFD_CORE is not set +# CONFIG_MFD_88PM860X is not set # CONFIG_MFD_SM501 is not set # CONFIG_HTC_PASIC3 is not set # CONFIG_TWL4030_CORE is not set # CONFIG_MFD_TMIO is not set # CONFIG_PMIC_DA903X is not set # CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_MAX8925 is not set # CONFIG_MFD_WM8400 is not set # CONFIG_MFD_WM831X is not set # CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set # CONFIG_MFD_PCF50633 is not set # CONFIG_AB3100_CORE is not set -# CONFIG_MFD_88PM8607 is not set # CONFIG_REGULATOR is not set # CONFIG_MEDIA_SUPPORT is not set @@ -813,7 +815,6 @@ CONFIG_USB=y CONFIG_USB_DEVICEFS=y # CONFIG_USB_DEVICE_CLASS is not set # CONFIG_USB_DYNAMIC_MINORS is not set -# CONFIG_USB_SUSPEND is not set # CONFIG_USB_OTG is not set # CONFIG_USB_OTG_WHITELIST is not set # CONFIG_USB_OTG_BLACKLIST_HUB is not set @@ -891,7 +892,6 @@ CONFIG_USB_STORAGE=y # CONFIG_USB_RIO500 is not set # CONFIG_USB_LEGOTOWER is not set # CONFIG_USB_LCD is not set -# CONFIG_USB_BERRY_CHARGE is not set # CONFIG_USB_LED is not set # CONFIG_USB_CYPRESS_CY7C63 is not set # CONFIG_USB_CYTHERM is not set @@ -903,7 +903,6 @@ CONFIG_USB_STORAGE=y # CONFIG_USB_IOWARRIOR is not set # CONFIG_USB_TEST is not set # CONFIG_USB_ISIGHTFW is not set -# CONFIG_USB_VST is not set # CONFIG_USB_GADGET is not set # @@ -1009,6 +1008,7 @@ CONFIG_JFFS2_ZLIB=y # CONFIG_JFFS2_LZO is not set CONFIG_JFFS2_RTIME=y # CONFIG_JFFS2_RUBIN is not set +# CONFIG_LOGFS is not set CONFIG_CRAMFS=y # CONFIG_SQUASHFS is not set # CONFIG_VXFS_FS is not set diff --git a/arch/powerpc/configs/52xx/lite5200b_defconfig b/arch/powerpc/configs/52xx/lite5200b_defconfig index 7b3f4d0ed404..90492ff25232 100644 --- a/arch/powerpc/configs/52xx/lite5200b_defconfig +++ b/arch/powerpc/configs/52xx/lite5200b_defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.33-rc2 -# Wed Dec 30 14:45:09 2009 +# Linux kernel version: 2.6.34-rc1 +# Wed Mar 10 14:34:24 2010 # # CONFIG_PPC64 is not set @@ -95,11 +95,6 @@ CONFIG_RCU_FANOUT=32 # CONFIG_TREE_RCU_TRACE is not set # CONFIG_IKCONFIG is not set CONFIG_LOG_BUF_SHIFT=14 -CONFIG_GROUP_SCHED=y -# CONFIG_FAIR_GROUP_SCHED is not set -# CONFIG_RT_GROUP_SCHED is not set -CONFIG_USER_SCHED=y -# CONFIG_CGROUP_SCHED is not set # CONFIG_CGROUPS is not set CONFIG_SYSFS_DEPRECATED=y CONFIG_SYSFS_DEPRECATED_V2=y @@ -110,6 +105,7 @@ CONFIG_INITRAMFS_SOURCE="" CONFIG_RD_GZIP=y # CONFIG_RD_BZIP2 is not set # CONFIG_RD_LZMA is not set +# CONFIG_RD_LZO is not set # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SYSCTL=y CONFIG_ANON_INODES=y @@ -317,6 +313,7 @@ CONFIG_SUSPEND=y CONFIG_SUSPEND_FREEZER=y # CONFIG_HIBERNATION is not set # CONFIG_PM_RUNTIME is not set +CONFIG_PM_OPS=y CONFIG_SECCOMP=y CONFIG_ISA_DMA_API=y @@ -333,7 +330,6 @@ CONFIG_PCI_SYSCALL=y # CONFIG_PCIEPORTBUS is not set CONFIG_ARCH_SUPPORTS_MSI=y # CONFIG_PCI_MSI is not set -CONFIG_PCI_LEGACY=y # CONFIG_PCI_DEBUG is not set # CONFIG_PCI_STUB is not set # CONFIG_PCI_IOV is not set @@ -360,7 +356,6 @@ CONFIG_NET=y # Networking options # CONFIG_PACKET=y -# CONFIG_PACKET_MMAP is not set CONFIG_UNIX=y CONFIG_XFRM=y CONFIG_XFRM_USER=m @@ -457,6 +452,8 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y # CONFIG_SYS_HYPERVISOR is not set # CONFIG_CONNECTOR is not set # CONFIG_MTD is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_DYNAMIC=y CONFIG_OF_DEVICE=y CONFIG_OF_I2C=y CONFIG_OF_MDIO=y @@ -631,6 +628,7 @@ CONFIG_ATA_SFF=y # CONFIG_PATA_IT821X is not set # CONFIG_PATA_IT8213 is not set # CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_LEGACY is not set # CONFIG_PATA_TRIFLEX is not set # CONFIG_PATA_MARVELL is not set CONFIG_PATA_MPC52xx=y @@ -668,7 +666,7 @@ CONFIG_PATA_MPC52xx=y # # -# See the help texts for more information. +# The newer stack is recommended. # # CONFIG_FIREWIRE is not set # CONFIG_IEEE1394 is not set @@ -768,6 +766,7 @@ CONFIG_CHELSIO_T3_DEPENDS=y # CONFIG_MLX4_CORE is not set # CONFIG_TEHUTI is not set # CONFIG_BNX2X is not set +# CONFIG_QLCNIC is not set # CONFIG_QLGE is not set # CONFIG_SFC is not set # CONFIG_BE2NET is not set @@ -828,6 +827,7 @@ CONFIG_SERIAL_MPC52xx=y CONFIG_SERIAL_MPC52xx_CONSOLE=y CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200 # CONFIG_SERIAL_JSM is not set +# CONFIG_SERIAL_TIMBERDALE is not set # CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set CONFIG_UNIX98_PTYS=y # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set @@ -879,6 +879,7 @@ CONFIG_I2C_HELPER_AUTO=y CONFIG_I2C_MPC=y # CONFIG_I2C_OCORES is not set # CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set # # External I2C/SMBus adapter drivers @@ -924,18 +925,21 @@ CONFIG_SSB_POSSIBLE=y # Multifunction device drivers # # CONFIG_MFD_CORE is not set +# CONFIG_MFD_88PM860X is not set # CONFIG_MFD_SM501 is not set # CONFIG_HTC_PASIC3 is not set # CONFIG_TWL4030_CORE is not set # CONFIG_MFD_TMIO is not set # CONFIG_PMIC_DA903X is not set # CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_MAX8925 is not set # CONFIG_MFD_WM8400 is not set # CONFIG_MFD_WM831X is not set # CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set # CONFIG_MFD_PCF50633 is not set # CONFIG_AB3100_CORE is not set -# CONFIG_MFD_88PM8607 is not set +# CONFIG_LPC_SCH is not set # CONFIG_REGULATOR is not set # CONFIG_MEDIA_SUPPORT is not set @@ -944,6 +948,7 @@ CONFIG_SSB_POSSIBLE=y # # CONFIG_AGP is not set CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=16 # CONFIG_DRM is not set # CONFIG_VGASTATE is not set CONFIG_VIDEO_OUTPUT_CONTROL=m @@ -1062,6 +1067,7 @@ CONFIG_MISC_FILESYSTEMS=y # CONFIG_BEFS_FS is not set # CONFIG_BFS_FS is not set # CONFIG_EFS_FS is not set +# CONFIG_LOGFS is not set # CONFIG_CRAMFS is not set # CONFIG_SQUASHFS is not set # CONFIG_VXFS_FS is not set diff --git a/arch/powerpc/configs/52xx/motionpro_defconfig b/arch/powerpc/configs/52xx/motionpro_defconfig index eaae2d469aa0..dffc8cac825f 100644 --- a/arch/powerpc/configs/52xx/motionpro_defconfig +++ b/arch/powerpc/configs/52xx/motionpro_defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.33-rc2 -# Wed Dec 30 14:45:08 2009 +# Linux kernel version: 2.6.34-rc1 +# Wed Mar 10 14:34:23 2010 # # CONFIG_PPC64 is not set @@ -94,11 +94,6 @@ CONFIG_RCU_FANOUT=32 # CONFIG_TREE_RCU_TRACE is not set # CONFIG_IKCONFIG is not set CONFIG_LOG_BUF_SHIFT=14 -CONFIG_GROUP_SCHED=y -CONFIG_FAIR_GROUP_SCHED=y -# CONFIG_RT_GROUP_SCHED is not set -CONFIG_USER_SCHED=y -# CONFIG_CGROUP_SCHED is not set # CONFIG_CGROUPS is not set CONFIG_SYSFS_DEPRECATED=y CONFIG_SYSFS_DEPRECATED_V2=y @@ -109,6 +104,7 @@ CONFIG_INITRAMFS_SOURCE="" CONFIG_RD_GZIP=y # CONFIG_RD_BZIP2 is not set # CONFIG_RD_LZMA is not set +# CONFIG_RD_LZO is not set # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SYSCTL=y CONFIG_ANON_INODES=y @@ -341,7 +337,6 @@ CONFIG_NET=y # Networking options # CONFIG_PACKET=y -# CONFIG_PACKET_MMAP is not set CONFIG_UNIX=y CONFIG_XFRM=y CONFIG_XFRM_USER=y @@ -518,6 +513,8 @@ CONFIG_MTD_ROM=y # UBI - Unsorted block images # # CONFIG_MTD_UBI is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_DYNAMIC=y CONFIG_OF_DEVICE=y CONFIG_OF_I2C=y CONFIG_OF_MDIO=y @@ -699,6 +696,7 @@ CONFIG_SERIAL_CORE_CONSOLE=y CONFIG_SERIAL_MPC52xx=y CONFIG_SERIAL_MPC52xx_CONSOLE=y CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200 +# CONFIG_SERIAL_TIMBERDALE is not set # CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set CONFIG_UNIX98_PTYS=y # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set @@ -728,6 +726,7 @@ CONFIG_I2C_HELPER_AUTO=y CONFIG_I2C_MPC=y # CONFIG_I2C_OCORES is not set # CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set # # External I2C/SMBus adapter drivers @@ -773,10 +772,11 @@ CONFIG_HWMON=y # CONFIG_SENSORS_ADM1029 is not set # CONFIG_SENSORS_ADM1031 is not set # CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7411 is not set # CONFIG_SENSORS_ADT7462 is not set # CONFIG_SENSORS_ADT7470 is not set -# CONFIG_SENSORS_ADT7473 is not set # CONFIG_SENSORS_ADT7475 is not set +# CONFIG_SENSORS_ASC7621 is not set # CONFIG_SENSORS_ATXP1 is not set # CONFIG_SENSORS_DS1621 is not set # CONFIG_SENSORS_F71805F is not set @@ -811,6 +811,7 @@ CONFIG_HWMON=y # CONFIG_SENSORS_SMSC47M192 is not set # CONFIG_SENSORS_SMSC47B397 is not set # CONFIG_SENSORS_ADS7828 is not set +# CONFIG_SENSORS_AMC6821 is not set # CONFIG_SENSORS_THMC50 is not set # CONFIG_SENSORS_TMP401 is not set # CONFIG_SENSORS_TMP421 is not set @@ -831,6 +832,7 @@ CONFIG_WATCHDOG=y # Watchdog Device Drivers # # CONFIG_SOFT_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set # CONFIG_MPC5200_WDT is not set CONFIG_SSB_POSSIBLE=y @@ -843,18 +845,20 @@ CONFIG_SSB_POSSIBLE=y # Multifunction device drivers # # CONFIG_MFD_CORE is not set +# CONFIG_MFD_88PM860X is not set # CONFIG_MFD_SM501 is not set # CONFIG_HTC_PASIC3 is not set # CONFIG_TWL4030_CORE is not set # CONFIG_MFD_TMIO is not set # CONFIG_PMIC_DA903X is not set # CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_MAX8925 is not set # CONFIG_MFD_WM8400 is not set # CONFIG_MFD_WM831X is not set # CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set # CONFIG_MFD_PCF50633 is not set # CONFIG_AB3100_CORE is not set -# CONFIG_MFD_88PM8607 is not set # CONFIG_REGULATOR is not set # CONFIG_MEDIA_SUPPORT is not set @@ -1050,6 +1054,7 @@ CONFIG_JFFS2_ZLIB=y # CONFIG_JFFS2_LZO is not set CONFIG_JFFS2_RTIME=y # CONFIG_JFFS2_RUBIN is not set +# CONFIG_LOGFS is not set CONFIG_CRAMFS=y # CONFIG_SQUASHFS is not set # CONFIG_VXFS_FS is not set diff --git a/arch/powerpc/configs/52xx/pcm030_defconfig b/arch/powerpc/configs/52xx/pcm030_defconfig index 1742c0200b75..3cb2a522046a 100644 --- a/arch/powerpc/configs/52xx/pcm030_defconfig +++ b/arch/powerpc/configs/52xx/pcm030_defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.33-rc2 -# Wed Dec 30 14:45:10 2009 +# Linux kernel version: 2.6.34-rc1 +# Wed Mar 10 14:34:25 2010 # # CONFIG_PPC64 is not set @@ -97,11 +97,6 @@ CONFIG_RCU_FANOUT=32 CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 -CONFIG_GROUP_SCHED=y -CONFIG_FAIR_GROUP_SCHED=y -# CONFIG_RT_GROUP_SCHED is not set -CONFIG_USER_SCHED=y -# CONFIG_CGROUP_SCHED is not set # CONFIG_CGROUPS is not set CONFIG_SYSFS_DEPRECATED=y CONFIG_SYSFS_DEPRECATED_V2=y @@ -326,7 +321,6 @@ CONFIG_PCI_SYSCALL=y # CONFIG_PCIEPORTBUS is not set CONFIG_ARCH_SUPPORTS_MSI=y # CONFIG_PCI_MSI is not set -CONFIG_PCI_LEGACY=y # CONFIG_PCI_STUB is not set # CONFIG_PCI_IOV is not set # CONFIG_PCCARD is not set @@ -352,7 +346,6 @@ CONFIG_NET=y # Networking options # CONFIG_PACKET=y -# CONFIG_PACKET_MMAP is not set CONFIG_UNIX=y # CONFIG_NET_KEY is not set CONFIG_INET=y @@ -525,6 +518,8 @@ CONFIG_MTD_PHYSMAP=y # UBI - Unsorted block images # # CONFIG_MTD_UBI is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_DYNAMIC=y CONFIG_OF_DEVICE=y CONFIG_OF_I2C=y CONFIG_OF_MDIO=y @@ -610,6 +605,7 @@ CONFIG_ATA_SFF=y # CONFIG_PATA_IT821X is not set # CONFIG_PATA_IT8213 is not set # CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_LEGACY is not set # CONFIG_PATA_TRIFLEX is not set # CONFIG_PATA_MARVELL is not set CONFIG_PATA_MPC52xx=m @@ -647,7 +643,7 @@ CONFIG_PATA_MPC52xx=m # # -# See the help texts for more information. +# The newer stack is recommended. # # CONFIG_FIREWIRE is not set # CONFIG_IEEE1394 is not set @@ -775,6 +771,7 @@ CONFIG_SERIAL_MPC52xx=y CONFIG_SERIAL_MPC52xx_CONSOLE=y CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=9600 # CONFIG_SERIAL_JSM is not set +# CONFIG_SERIAL_TIMBERDALE is not set # CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set CONFIG_UNIX98_PTYS=y # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set @@ -824,6 +821,7 @@ CONFIG_I2C_HELPER_AUTO=y CONFIG_I2C_MPC=y # CONFIG_I2C_OCORES is not set # CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set # # External I2C/SMBus adapter drivers @@ -870,18 +868,21 @@ CONFIG_SSB_POSSIBLE=y # Multifunction device drivers # # CONFIG_MFD_CORE is not set +# CONFIG_MFD_88PM860X is not set # CONFIG_MFD_SM501 is not set # CONFIG_HTC_PASIC3 is not set # CONFIG_TWL4030_CORE is not set # CONFIG_MFD_TMIO is not set # CONFIG_PMIC_DA903X is not set # CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_MAX8925 is not set # CONFIG_MFD_WM8400 is not set # CONFIG_MFD_WM831X is not set # CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set # CONFIG_MFD_PCF50633 is not set # CONFIG_AB3100_CORE is not set -# CONFIG_MFD_88PM8607 is not set +# CONFIG_LPC_SCH is not set # CONFIG_REGULATOR is not set # CONFIG_MEDIA_SUPPORT is not set @@ -890,6 +891,7 @@ CONFIG_SSB_POSSIBLE=y # # CONFIG_AGP is not set CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=16 # CONFIG_DRM is not set # CONFIG_VGASTATE is not set # CONFIG_VIDEO_OUTPUT_CONTROL is not set @@ -997,7 +999,6 @@ CONFIG_USB_STORAGE=m # CONFIG_USB_RIO500 is not set # CONFIG_USB_LEGOTOWER is not set # CONFIG_USB_LCD is not set -# CONFIG_USB_BERRY_CHARGE is not set # CONFIG_USB_LED is not set # CONFIG_USB_CYPRESS_CY7C63 is not set # CONFIG_USB_CYTHERM is not set @@ -1009,7 +1010,6 @@ CONFIG_USB_STORAGE=m # CONFIG_USB_IOWARRIOR is not set # CONFIG_USB_TEST is not set # CONFIG_USB_ISIGHTFW is not set -# CONFIG_USB_VST is not set # CONFIG_USB_GADGET is not set # @@ -1172,6 +1172,7 @@ CONFIG_JFFS2_ZLIB=y # CONFIG_JFFS2_LZO is not set CONFIG_JFFS2_RTIME=y # CONFIG_JFFS2_RUBIN is not set +# CONFIG_LOGFS is not set # CONFIG_CRAMFS is not set # CONFIG_SQUASHFS is not set # CONFIG_VXFS_FS is not set diff --git a/arch/powerpc/configs/52xx/tqm5200_defconfig b/arch/powerpc/configs/52xx/tqm5200_defconfig index 3972438db719..96181c62abfa 100644 --- a/arch/powerpc/configs/52xx/tqm5200_defconfig +++ b/arch/powerpc/configs/52xx/tqm5200_defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.33-rc2 -# Wed Dec 30 14:45:09 2009 +# Linux kernel version: 2.6.34-rc1 +# Wed Mar 10 14:34:24 2010 # # CONFIG_PPC64 is not set @@ -94,11 +94,6 @@ CONFIG_RCU_FANOUT=32 # CONFIG_TREE_RCU_TRACE is not set # CONFIG_IKCONFIG is not set CONFIG_LOG_BUF_SHIFT=14 -CONFIG_GROUP_SCHED=y -CONFIG_FAIR_GROUP_SCHED=y -# CONFIG_RT_GROUP_SCHED is not set -CONFIG_USER_SCHED=y -# CONFIG_CGROUP_SCHED is not set # CONFIG_CGROUPS is not set CONFIG_SYSFS_DEPRECATED=y CONFIG_SYSFS_DEPRECATED_V2=y @@ -109,6 +104,7 @@ CONFIG_INITRAMFS_SOURCE="" CONFIG_RD_GZIP=y # CONFIG_RD_BZIP2 is not set # CONFIG_RD_LZMA is not set +# CONFIG_RD_LZO is not set # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SYSCTL=y CONFIG_ANON_INODES=y @@ -346,7 +342,6 @@ CONFIG_NET=y # Networking options # CONFIG_PACKET=y -# CONFIG_PACKET_MMAP is not set CONFIG_UNIX=y CONFIG_XFRM=y CONFIG_XFRM_USER=y @@ -524,6 +519,8 @@ CONFIG_MTD_PHYSMAP_OF=y # UBI - Unsorted block images # # CONFIG_MTD_UBI is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_DYNAMIC=y CONFIG_OF_DEVICE=y CONFIG_OF_I2C=y CONFIG_OF_MDIO=y @@ -704,6 +701,7 @@ CONFIG_SERIAL_CORE_CONSOLE=y CONFIG_SERIAL_MPC52xx=y CONFIG_SERIAL_MPC52xx_CONSOLE=y CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200 +# CONFIG_SERIAL_TIMBERDALE is not set # CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set CONFIG_UNIX98_PTYS=y # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set @@ -733,6 +731,7 @@ CONFIG_I2C_HELPER_AUTO=y CONFIG_I2C_MPC=y # CONFIG_I2C_OCORES is not set # CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set # # External I2C/SMBus adapter drivers @@ -780,10 +779,11 @@ CONFIG_HWMON=y # CONFIG_SENSORS_ADM1029 is not set # CONFIG_SENSORS_ADM1031 is not set # CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7411 is not set # CONFIG_SENSORS_ADT7462 is not set # CONFIG_SENSORS_ADT7470 is not set -# CONFIG_SENSORS_ADT7473 is not set # CONFIG_SENSORS_ADT7475 is not set +# CONFIG_SENSORS_ASC7621 is not set # CONFIG_SENSORS_ATXP1 is not set # CONFIG_SENSORS_DS1621 is not set # CONFIG_SENSORS_F71805F is not set @@ -818,6 +818,7 @@ CONFIG_HWMON=y # CONFIG_SENSORS_SMSC47M192 is not set # CONFIG_SENSORS_SMSC47B397 is not set # CONFIG_SENSORS_ADS7828 is not set +# CONFIG_SENSORS_AMC6821 is not set # CONFIG_SENSORS_THMC50 is not set # CONFIG_SENSORS_TMP401 is not set # CONFIG_SENSORS_TMP421 is not set @@ -838,6 +839,7 @@ CONFIG_WATCHDOG=y # Watchdog Device Drivers # # CONFIG_SOFT_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set # CONFIG_MPC5200_WDT is not set # @@ -855,18 +857,20 @@ CONFIG_SSB_POSSIBLE=y # Multifunction device drivers # # CONFIG_MFD_CORE is not set +# CONFIG_MFD_88PM860X is not set # CONFIG_MFD_SM501 is not set # CONFIG_HTC_PASIC3 is not set # CONFIG_TWL4030_CORE is not set # CONFIG_MFD_TMIO is not set # CONFIG_PMIC_DA903X is not set # CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_MAX8925 is not set # CONFIG_MFD_WM8400 is not set # CONFIG_MFD_WM831X is not set # CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set # CONFIG_MFD_PCF50633 is not set # CONFIG_AB3100_CORE is not set -# CONFIG_MFD_88PM8607 is not set # CONFIG_REGULATOR is not set # CONFIG_MEDIA_SUPPORT is not set @@ -897,7 +901,6 @@ CONFIG_USB=y CONFIG_USB_DEVICEFS=y # CONFIG_USB_DEVICE_CLASS is not set # CONFIG_USB_DYNAMIC_MINORS is not set -# CONFIG_USB_SUSPEND is not set # CONFIG_USB_OTG is not set # CONFIG_USB_OTG_WHITELIST is not set # CONFIG_USB_OTG_BLACKLIST_HUB is not set @@ -975,7 +978,6 @@ CONFIG_USB_STORAGE=y # CONFIG_USB_RIO500 is not set # CONFIG_USB_LEGOTOWER is not set # CONFIG_USB_LCD is not set -# CONFIG_USB_BERRY_CHARGE is not set # CONFIG_USB_LED is not set # CONFIG_USB_CYPRESS_CY7C63 is not set # CONFIG_USB_CYTHERM is not set @@ -987,7 +989,6 @@ CONFIG_USB_STORAGE=y # CONFIG_USB_IOWARRIOR is not set # CONFIG_USB_TEST is not set # CONFIG_USB_ISIGHTFW is not set -# CONFIG_USB_VST is not set # CONFIG_USB_GADGET is not set # @@ -1151,6 +1152,7 @@ CONFIG_JFFS2_ZLIB=y # CONFIG_JFFS2_LZO is not set CONFIG_JFFS2_RTIME=y # CONFIG_JFFS2_RUBIN is not set +# CONFIG_LOGFS is not set CONFIG_CRAMFS=y # CONFIG_SQUASHFS is not set # CONFIG_VXFS_FS is not set diff --git a/arch/powerpc/configs/mpc5200_defconfig b/arch/powerpc/configs/mpc5200_defconfig index 61cf73d0000f..7012ac0134f0 100644 --- a/arch/powerpc/configs/mpc5200_defconfig +++ b/arch/powerpc/configs/mpc5200_defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.33-rc2 -# Wed Dec 30 15:08:52 2009 +# Linux kernel version: 2.6.34-rc1 +# Wed Mar 10 14:38:54 2010 # # CONFIG_PPC64 is not set @@ -96,30 +96,37 @@ CONFIG_RCU_FANOUT=32 # CONFIG_TREE_RCU_TRACE is not set # CONFIG_IKCONFIG is not set CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_GROUP_SCHED is not set # CONFIG_CGROUPS is not set CONFIG_SYSFS_DEPRECATED=y CONFIG_SYSFS_DEPRECATED_V2=y # CONFIG_RELAY is not set -# CONFIG_NAMESPACES is not set +CONFIG_NAMESPACES=y +# CONFIG_UTS_NS is not set +# CONFIG_IPC_NS is not set +# CONFIG_USER_NS is not set +# CONFIG_PID_NS is not set +# CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y CONFIG_INITRAMFS_SOURCE="" CONFIG_RD_GZIP=y -# CONFIG_RD_BZIP2 is not set -# CONFIG_RD_LZMA is not set +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_LZO=y # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SYSCTL=y CONFIG_ANON_INODES=y -CONFIG_EMBEDDED=y -# CONFIG_SYSCTL_SYSCALL is not set -# CONFIG_KALLSYMS is not set +# CONFIG_EMBEDDED is not set +CONFIG_SYSCTL_SYSCALL=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_ALL is not set +# CONFIG_KALLSYMS_EXTRA_PASS is not set CONFIG_HOTPLUG=y CONFIG_PRINTK=y CONFIG_BUG=y CONFIG_ELF_CORE=y CONFIG_BASE_FULL=y CONFIG_FUTEX=y -# CONFIG_EPOLL is not set +CONFIG_EPOLL=y CONFIG_SIGNALFD=y CONFIG_TIMERFD=y CONFIG_EVENTFD=y @@ -141,6 +148,7 @@ CONFIG_SLUB=y # CONFIG_SLOB is not set # CONFIG_PROFILING is not set CONFIG_HAVE_OPROFILE=y +# CONFIG_KPROBES is not set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y CONFIG_HAVE_IOREMAP_PROT=y CONFIG_HAVE_KPROBES=y @@ -320,6 +328,7 @@ CONFIG_SUSPEND=y CONFIG_SUSPEND_FREEZER=y # CONFIG_HIBERNATION is not set # CONFIG_PM_RUNTIME is not set +CONFIG_PM_OPS=y CONFIG_SECCOMP=y CONFIG_ISA_DMA_API=y @@ -336,7 +345,6 @@ CONFIG_PCI_SYSCALL=y # CONFIG_PCIEPORTBUS is not set CONFIG_ARCH_SUPPORTS_MSI=y # CONFIG_PCI_MSI is not set -CONFIG_PCI_LEGACY=y # CONFIG_PCI_DEBUG is not set # CONFIG_PCI_STUB is not set # CONFIG_PCI_IOV is not set @@ -363,7 +371,6 @@ CONFIG_NET=y # Networking options # CONFIG_PACKET=y -# CONFIG_PACKET_MMAP is not set CONFIG_UNIX=y CONFIG_XFRM=y CONFIG_XFRM_USER=m @@ -454,7 +461,9 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_DEVTMPFS is not set CONFIG_STANDALONE=y CONFIG_PREVENT_FIRMWARE_BUILD=y -# CONFIG_FW_LOADER is not set +CONFIG_FW_LOADER=y +CONFIG_FIRMWARE_IN_KERNEL=y +CONFIG_EXTRA_FIRMWARE="" # CONFIG_DEBUG_DRIVER is not set # CONFIG_DEBUG_DEVRES is not set # CONFIG_SYS_HYPERVISOR is not set @@ -554,6 +563,8 @@ CONFIG_MTD_UBI_BEB_RESERVE=1 # UBI debugging options # # CONFIG_MTD_UBI_DEBUG is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_DYNAMIC=y CONFIG_OF_DEVICE=y CONFIG_OF_GPIO=y CONFIG_OF_I2C=y @@ -732,6 +743,7 @@ CONFIG_ATA_SFF=y # CONFIG_PATA_IT821X is not set # CONFIG_PATA_IT8213 is not set # CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_LEGACY is not set # CONFIG_PATA_TRIFLEX is not set # CONFIG_PATA_MARVELL is not set CONFIG_PATA_MPC52xx=y @@ -770,7 +782,7 @@ CONFIG_PATA_PLATFORM=y # # -# See the help texts for more information. +# The newer stack is recommended. # # CONFIG_FIREWIRE is not set # CONFIG_IEEE1394 is not set @@ -929,6 +941,7 @@ CONFIG_SERIAL_MPC52xx=y CONFIG_SERIAL_MPC52xx_CONSOLE=y CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200 # CONFIG_SERIAL_JSM is not set +# CONFIG_SERIAL_TIMBERDALE is not set # CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set CONFIG_UNIX98_PTYS=y # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set @@ -981,6 +994,7 @@ CONFIG_I2C_ALGOBIT=y CONFIG_I2C_MPC=y # CONFIG_I2C_OCORES is not set # CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set # # External I2C/SMBus adapter drivers @@ -1010,9 +1024,9 @@ CONFIG_SPI_MASTER=y # # SPI Master Controller Drivers # -# CONFIG_SPI_BITBANG is not set -# CONFIG_SPI_GPIO is not set -# CONFIG_SPI_MPC52xx is not set +CONFIG_SPI_BITBANG=m +CONFIG_SPI_GPIO=m +CONFIG_SPI_MPC52xx=m CONFIG_SPI_MPC52xx_PSC=m # CONFIG_SPI_XILINX is not set # CONFIG_SPI_DESIGNWARE is not set @@ -1036,14 +1050,18 @@ CONFIG_GPIOLIB=y # # Memory mapped GPIO expanders: # +# CONFIG_GPIO_IT8761E is not set # CONFIG_GPIO_XILINX is not set +# CONFIG_GPIO_SCH is not set # # I2C GPIO expanders: # +# CONFIG_GPIO_MAX7300 is not set # CONFIG_GPIO_MAX732X is not set # CONFIG_GPIO_PCA953X is not set # CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_ADP5588 is not set # # PCI GPIO expanders: @@ -1080,10 +1098,11 @@ CONFIG_HWMON=y # CONFIG_SENSORS_ADM1029 is not set # CONFIG_SENSORS_ADM1031 is not set # CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7411 is not set # CONFIG_SENSORS_ADT7462 is not set # CONFIG_SENSORS_ADT7470 is not set -# CONFIG_SENSORS_ADT7473 is not set # CONFIG_SENSORS_ADT7475 is not set +# CONFIG_SENSORS_ASC7621 is not set # CONFIG_SENSORS_ATXP1 is not set # CONFIG_SENSORS_DS1621 is not set # CONFIG_SENSORS_I5K_AMB is not set @@ -1123,6 +1142,7 @@ CONFIG_HWMON=y # CONFIG_SENSORS_SMSC47M192 is not set # CONFIG_SENSORS_SMSC47B397 is not set # CONFIG_SENSORS_ADS7828 is not set +# CONFIG_SENSORS_AMC6821 is not set # CONFIG_SENSORS_THMC50 is not set # CONFIG_SENSORS_TMP401 is not set # CONFIG_SENSORS_TMP421 is not set @@ -1147,6 +1167,7 @@ CONFIG_WATCHDOG=y # Watchdog Device Drivers # # CONFIG_SOFT_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set # CONFIG_ALIM7101_WDT is not set # CONFIG_MPC5200_WDT is not set # CONFIG_WATCHDOG_RTAS is not set @@ -1172,22 +1193,27 @@ CONFIG_SSB_POSSIBLE=y # Multifunction device drivers # # CONFIG_MFD_CORE is not set +# CONFIG_MFD_88PM860X is not set # CONFIG_MFD_SM501 is not set # CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set # CONFIG_TPS65010 is not set # CONFIG_TWL4030_CORE is not set # CONFIG_MFD_TMIO is not set # CONFIG_PMIC_DA903X is not set # CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_MAX8925 is not set # CONFIG_MFD_WM8400 is not set # CONFIG_MFD_WM831X is not set # CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set # CONFIG_MFD_PCF50633 is not set # CONFIG_MFD_MC13783 is not set # CONFIG_AB3100_CORE is not set # CONFIG_EZX_PCAP is not set -# CONFIG_MFD_88PM8607 is not set # CONFIG_AB4500_CORE is not set +# CONFIG_MFD_TIMBERDALE is not set +# CONFIG_LPC_SCH is not set # CONFIG_REGULATOR is not set # CONFIG_MEDIA_SUPPORT is not set @@ -1196,6 +1222,7 @@ CONFIG_SSB_POSSIBLE=y # # CONFIG_AGP is not set CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=16 CONFIG_DRM=y # CONFIG_DRM_TDFX is not set # CONFIG_DRM_R128 is not set @@ -1309,32 +1336,46 @@ CONFIG_USB_HID=y # # Special HID drivers # +# CONFIG_HID_3M_PCT is not set CONFIG_HID_A4TECH=y -# CONFIG_HID_APPLE is not set +CONFIG_HID_APPLE=y CONFIG_HID_BELKIN=y CONFIG_HID_CHERRY=y -# CONFIG_HID_CHICONY is not set +CONFIG_HID_CHICONY=y CONFIG_HID_CYPRESS=y -# CONFIG_HID_DRAGONRISE is not set +CONFIG_HID_DRAGONRISE=y +# CONFIG_DRAGONRISE_FF is not set CONFIG_HID_EZKEY=y -# CONFIG_HID_KYE is not set -# CONFIG_HID_GYRATION is not set -# CONFIG_HID_TWINHAN is not set -# CONFIG_HID_KENSINGTON is not set -# CONFIG_HID_LOGITECH is not set -# CONFIG_HID_MICROSOFT is not set -# CONFIG_HID_MONTEREY is not set +CONFIG_HID_KYE=y +CONFIG_HID_GYRATION=y +CONFIG_HID_TWINHAN=y +CONFIG_HID_KENSINGTON=y +CONFIG_HID_LOGITECH=y +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +# CONFIG_LOGIG940_FF is not set +CONFIG_HID_MICROSOFT=y +# CONFIG_HID_MOSART is not set +CONFIG_HID_MONTEREY=y # CONFIG_HID_NTRIG is not set -# CONFIG_HID_PANTHERLORD is not set -# CONFIG_HID_PETALYNX is not set -# CONFIG_HID_SAMSUNG is not set -# CONFIG_HID_SONY is not set -# CONFIG_HID_SUNPLUS is not set -# CONFIG_HID_GREENASIA is not set -# CONFIG_HID_SMARTJOYPLUS is not set -# CONFIG_HID_TOPSEED is not set -# CONFIG_HID_THRUSTMASTER is not set -# CONFIG_HID_ZEROPLUS is not set +CONFIG_HID_ORTEK=y +CONFIG_HID_PANTHERLORD=y +# CONFIG_PANTHERLORD_FF is not set +CONFIG_HID_PETALYNX=y +# CONFIG_HID_QUANTA is not set +CONFIG_HID_SAMSUNG=y +CONFIG_HID_SONY=y +# CONFIG_HID_STANTUM is not set +CONFIG_HID_SUNPLUS=y +CONFIG_HID_GREENASIA=y +# CONFIG_GREENASIA_FF is not set +CONFIG_HID_SMARTJOYPLUS=y +# CONFIG_SMARTJOYPLUS_FF is not set +CONFIG_HID_TOPSEED=y +CONFIG_HID_THRUSTMASTER=y +# CONFIG_THRUSTMASTER_FF is not set +CONFIG_HID_ZEROPLUS=y +# CONFIG_ZEROPLUS_FF is not set CONFIG_USB_SUPPORT=y CONFIG_USB_ARCH_HAS_HCD=y CONFIG_USB_ARCH_HAS_OHCI=y @@ -1349,10 +1390,7 @@ CONFIG_USB=y CONFIG_USB_DEVICEFS=y # CONFIG_USB_DEVICE_CLASS is not set # CONFIG_USB_DYNAMIC_MINORS is not set -# CONFIG_USB_SUSPEND is not set # CONFIG_USB_OTG is not set -# CONFIG_USB_OTG_WHITELIST is not set -# CONFIG_USB_OTG_BLACKLIST_HUB is not set CONFIG_USB_MON=y # CONFIG_USB_WUSB is not set # CONFIG_USB_WUSB_CBAF is not set @@ -1433,7 +1471,6 @@ CONFIG_USB_STORAGE=y # CONFIG_USB_RIO500 is not set # CONFIG_USB_LEGOTOWER is not set # CONFIG_USB_LCD is not set -# CONFIG_USB_BERRY_CHARGE is not set # CONFIG_USB_LED is not set # CONFIG_USB_CYPRESS_CY7C63 is not set # CONFIG_USB_CYTHERM is not set @@ -1445,7 +1482,6 @@ CONFIG_USB_STORAGE=y # CONFIG_USB_IOWARRIOR is not set # CONFIG_USB_TEST is not set # CONFIG_USB_ISIGHTFW is not set -# CONFIG_USB_VST is not set # CONFIG_USB_GADGET is not set # @@ -1636,6 +1672,7 @@ CONFIG_UBIFS_FS=m CONFIG_UBIFS_FS_LZO=y CONFIG_UBIFS_FS_ZLIB=y # CONFIG_UBIFS_FS_DEBUG is not set +# CONFIG_LOGFS is not set CONFIG_CRAMFS=y # CONFIG_SQUASHFS is not set # CONFIG_VXFS_FS is not set @@ -1730,8 +1767,11 @@ CONFIG_CRC32=y CONFIG_ZLIB_INFLATE=y CONFIG_ZLIB_DEFLATE=y CONFIG_LZO_COMPRESS=m -CONFIG_LZO_DECOMPRESS=m +CONFIG_LZO_DECOMPRESS=y CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_LZO=y CONFIG_HAS_IOMEM=y CONFIG_HAS_IOPORT=y CONFIG_HAS_DMA=y @@ -1776,11 +1816,11 @@ CONFIG_SCHED_DEBUG=y # CONFIG_DEBUG_SPINLOCK_SLEEP is not set # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set # CONFIG_DEBUG_KOBJECT is not set -# CONFIG_DEBUG_BUGVERBOSE is not set +CONFIG_DEBUG_BUGVERBOSE=y CONFIG_DEBUG_INFO=y # CONFIG_DEBUG_VM is not set # CONFIG_DEBUG_WRITECOUNT is not set -# CONFIG_DEBUG_MEMORY_INIT is not set +CONFIG_DEBUG_MEMORY_INIT=y # CONFIG_DEBUG_LIST is not set # CONFIG_DEBUG_SG is not set # CONFIG_DEBUG_NOTIFIERS is not set -- cgit v1.2.3 From 220b140b52ab6cc133f674a7ffec8fa792054f25 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 10 Mar 2010 20:45:52 +1100 Subject: perf_event: Fix oops triggered by cpu offline/online Anton Blanchard found that he could reliably make the kernel hit a BUG_ON in the slab allocator by taking a cpu offline and then online while a system-wide perf record session was running. The reason is that when the cpu comes up, we completely reinitialize the ctx field of the struct perf_cpu_context for the cpu. If there is a system-wide perf record session running, then there will be a struct perf_event that has a reference to the context, so its refcount will be 2. (The perf_event has been removed from the context's group_entry and event_entry lists by perf_event_exit_cpu(), but that doesn't remove the perf_event's reference to the context and doesn't decrement the context's refcount.) When the cpu comes up, perf_event_init_cpu() gets called, and it calls __perf_event_init_context() on the cpu's context. That resets the refcount to 1. Then when the perf record session finishes and the perf_event is closed, the refcount gets decremented to 0 and the context gets kfreed after an RCU grace period. Since the context wasn't kmalloced -- it's part of a per-cpu variable -- bad things happen. In fact we don't need to completely reinitialize the context when the cpu comes up. It's sufficient to initialize the context once at boot, but we need to do it for all possible cpus. This moves the context initialization to happen at boot time. With this, we don't trash the refcount and the context never gets kfreed, and we don't hit the BUG_ON. Reported-by: Anton Blanchard Signed-off-by: Paul Mackerras Tested-by: Anton Blanchard Acked-by: Peter Zijlstra Cc: Signed-off-by: Ingo Molnar --- kernel/perf_event.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/kernel/perf_event.c b/kernel/perf_event.c index c502b18594cc..fb3031cf9f17 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -5368,12 +5368,22 @@ int perf_event_init_task(struct task_struct *child) return ret; } +static void __init perf_event_init_all_cpus(void) +{ + int cpu; + struct perf_cpu_context *cpuctx; + + for_each_possible_cpu(cpu) { + cpuctx = &per_cpu(perf_cpu_context, cpu); + __perf_event_init_context(&cpuctx->ctx, NULL); + } +} + static void __cpuinit perf_event_init_cpu(int cpu) { struct perf_cpu_context *cpuctx; cpuctx = &per_cpu(perf_cpu_context, cpu); - __perf_event_init_context(&cpuctx->ctx, NULL); spin_lock(&perf_resource_lock); cpuctx->max_pertask = perf_max_events - perf_reserved_percpu; @@ -5439,6 +5449,7 @@ static struct notifier_block __cpuinitdata perf_cpu_nb = { void __init perf_event_init(void) { + perf_event_init_all_cpus(); perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, (void *)(long)smp_processor_id()); perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE, -- cgit v1.2.3 From a12b51c478899fe0b7e874a559b05ba35f1128ee Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 10 Mar 2010 20:36:09 +1100 Subject: perf tools: Fix sparse CPU numbering related bugs At present, the perf subcommands that do system-wide monitoring (perf stat, perf record and perf top) don't work properly unless the online cpus are numbered 0, 1, ..., N-1. These tools ask for the number of online cpus with sysconf(_SC_NPROCESSORS_ONLN) and then try to create events for cpus 0, 1, ..., N-1. This creates problems for systems where the online cpus are numbered sparsely. For example, a POWER6 system in single-threaded mode (i.e. only running 1 hardware thread per core) will have only even-numbered cpus online. This fixes the problem by reading the /sys/devices/system/cpu/online file to find out which cpus are online. The code that does that is in tools/perf/util/cpumap.[ch], and consists of a read_cpu_map() function that sets up a cpumap[] array and returns the number of online cpus. If /sys/devices/system/cpu/online can't be read or can't be parsed successfully, it falls back to using sysconf to ask how many cpus are online and sets up an identity map in cpumap[]. The perf record, perf stat and perf top code then calls read_cpu_map() in the system-wide monitoring case (instead of sysconf) and uses cpumap[] to get the cpu numbers to pass to perf_event_open. Signed-off-by: Paul Mackerras Cc: Anton Blanchard Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo LKML-Reference: <20100310093609.GA3959@brick.ozlabs.ibm.com> Signed-off-by: Ingo Molnar --- tools/perf/Makefile | 2 ++ tools/perf/builtin-record.c | 7 +++--- tools/perf/builtin-stat.c | 10 +++++--- tools/perf/builtin-top.c | 9 +++---- tools/perf/util/cpumap.c | 59 +++++++++++++++++++++++++++++++++++++++++++++ tools/perf/util/cpumap.h | 7 ++++++ 6 files changed, 81 insertions(+), 13 deletions(-) create mode 100644 tools/perf/util/cpumap.c create mode 100644 tools/perf/util/cpumap.h diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 2d537382c686..5840499e2d22 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -387,6 +387,7 @@ LIB_H += util/thread.h LIB_H += util/trace-event.h LIB_H += util/probe-finder.h LIB_H += util/probe-event.h +LIB_H += util/cpumap.h LIB_OBJS += util/abspath.o LIB_OBJS += util/alias.o @@ -433,6 +434,7 @@ LIB_OBJS += util/sort.o LIB_OBJS += util/hist.o LIB_OBJS += util/probe-event.o LIB_OBJS += util/util.o +LIB_OBJS += util/cpumap.o BUILTIN_OBJS += builtin-annotate.o diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index f573bbb83572..b09d3b27ca14 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -22,6 +22,7 @@ #include "util/debug.h" #include "util/session.h" #include "util/symbol.h" +#include "util/cpumap.h" #include #include @@ -421,9 +422,6 @@ static int __cmd_record(int argc, const char **argv) char buf; page_size = sysconf(_SC_PAGE_SIZE); - nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); - assert(nr_cpus <= MAX_NR_CPUS); - assert(nr_cpus >= 0); atexit(sig_atexit); signal(SIGCHLD, sig_handler); @@ -547,8 +545,9 @@ static int __cmd_record(int argc, const char **argv) if ((!system_wide && !inherit) || profile_cpu != -1) { open_counters(profile_cpu, target_pid); } else { + nr_cpus = read_cpu_map(); for (i = 0; i < nr_cpus; i++) - open_counters(i, target_pid); + open_counters(cpumap[i], target_pid); } if (file_new) { diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index e8c85d5aec41..95db31cff6fd 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -45,6 +45,7 @@ #include "util/event.h" #include "util/debug.h" #include "util/header.h" +#include "util/cpumap.h" #include #include @@ -151,7 +152,7 @@ static void create_perf_stat_counter(int counter, int pid) unsigned int cpu; for (cpu = 0; cpu < nr_cpus; cpu++) { - fd[cpu][counter] = sys_perf_event_open(attr, -1, cpu, -1, 0); + fd[cpu][counter] = sys_perf_event_open(attr, -1, cpumap[cpu], -1, 0); if (fd[cpu][counter] < 0 && verbose) fprintf(stderr, ERR_PERF_OPEN, counter, fd[cpu][counter], strerror(errno)); @@ -519,9 +520,10 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used) nr_counters = ARRAY_SIZE(default_attrs); } - nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); - assert(nr_cpus <= MAX_NR_CPUS); - assert((int)nr_cpus >= 0); + if (system_wide) + nr_cpus = read_cpu_map(); + else + nr_cpus = 1; /* * We dont want to block the signals - that would cause diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 31f2e597800c..0b719e3dde05 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -28,6 +28,7 @@ #include #include "util/parse-options.h" #include "util/parse-events.h" +#include "util/cpumap.h" #include "util/debug.h" @@ -1123,7 +1124,7 @@ static void start_counter(int i, int counter) cpu = profile_cpu; if (target_pid == -1 && profile_cpu == -1) - cpu = i; + cpu = cpumap[i]; attr = attrs + counter; @@ -1347,12 +1348,10 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) attrs[counter].sample_period = default_interval; } - nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); - assert(nr_cpus <= MAX_NR_CPUS); - assert(nr_cpus >= 0); - if (target_pid != -1 || profile_cpu != -1) nr_cpus = 1; + else + nr_cpus = read_cpu_map(); get_term_dimensions(&winsize); if (print_entries == 0) { diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c new file mode 100644 index 000000000000..4e01490e51e5 --- /dev/null +++ b/tools/perf/util/cpumap.c @@ -0,0 +1,59 @@ +#include "util.h" +#include "../perf.h" +#include "cpumap.h" +#include +#include + +int cpumap[MAX_NR_CPUS]; + +static int default_cpu_map(void) +{ + int nr_cpus, i; + + nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); + assert(nr_cpus <= MAX_NR_CPUS); + assert((int)nr_cpus >= 0); + + for (i = 0; i < nr_cpus; ++i) + cpumap[i] = i; + + return nr_cpus; +} + +int read_cpu_map(void) +{ + FILE *onlnf; + int nr_cpus = 0; + int n, cpu, prev; + char sep; + + onlnf = fopen("/sys/devices/system/cpu/online", "r"); + if (!onlnf) + return default_cpu_map(); + + sep = 0; + prev = -1; + for (;;) { + n = fscanf(onlnf, "%u%c", &cpu, &sep); + if (n <= 0) + break; + if (prev >= 0) { + assert(nr_cpus + cpu - prev - 1 < MAX_NR_CPUS); + while (++prev < cpu) + cpumap[nr_cpus++] = prev; + } + assert (nr_cpus < MAX_NR_CPUS); + cpumap[nr_cpus++] = cpu; + if (n == 2 && sep == '-') + prev = cpu; + else + prev = -1; + if (n == 1 || sep == '\n') + break; + } + fclose(onlnf); + if (nr_cpus > 0) + return nr_cpus; + + return default_cpu_map(); +} diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h new file mode 100644 index 000000000000..86c78bb33098 --- /dev/null +++ b/tools/perf/util/cpumap.h @@ -0,0 +1,7 @@ +#ifndef __PERF_CPUMAP_H +#define __PERF_CPUMAP_H + +extern int read_cpu_map(void); +extern int cpumap[]; + +#endif /* __PERF_CPUMAP_H */ -- cgit v1.2.3 From ccfe27d7000668b02d10fc3e06aa49e3e3603162 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Thu, 14 Jan 2010 11:21:02 +0100 Subject: microblaze: Support DMA Add DMA support for Microblaze. There are some part of this new feature: 1. Basic DMA support 2. Enable DMA debug option 3. Setup notifier Ad 1. dma-mapping come from powerpc and x86 version and it is based on generic dma-mapping-common.h Ad 2. DMA support debug features which is used in generic file. For more information please look at Documentation/DMA-API.txt Ad 3. notifier is very important to setup dma_ops. Without this part for example ll_temac driver failed because there are no setup dma operations. Signed-off-by: Michal Simek --- arch/microblaze/Kconfig | 5 +- arch/microblaze/include/asm/device.h | 4 + arch/microblaze/include/asm/dma-mapping.h | 155 +++++++++++++++++++++++++++++- arch/microblaze/include/asm/io.h | 1 + arch/microblaze/kernel/Makefile | 2 +- arch/microblaze/kernel/dma.c | 124 ++++++++++++++++++++++++ arch/microblaze/kernel/setup.c | 36 +++++++ 7 files changed, 322 insertions(+), 5 deletions(-) create mode 100644 arch/microblaze/kernel/dma.c diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index b008168ae946..71ec04137417 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -14,6 +14,8 @@ config MICROBLAZE select USB_ARCH_HAS_EHCI select ARCH_WANT_OPTIONAL_GPIOLIB select HAVE_OPROFILE + select HAVE_DMA_ATTRS + select HAVE_DMA_API_DEBUG select TRACING_SUPPORT config SWAP @@ -76,9 +78,6 @@ config HAVE_LATENCYTOP_SUPPORT config PCI def_bool n -config NO_DMA - def_bool y - config DTC def_bool y diff --git a/arch/microblaze/include/asm/device.h b/arch/microblaze/include/asm/device.h index 78a038452c0f..402b46e630f6 100644 --- a/arch/microblaze/include/asm/device.h +++ b/arch/microblaze/include/asm/device.h @@ -14,6 +14,10 @@ struct device_node; struct dev_archdata { /* Optional pointer to an OF device node */ struct device_node *of_node; + + /* DMA operations on that device */ + struct dma_map_ops *dma_ops; + void *dma_data; }; struct pdev_archdata { diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h index d00e40099165..096fa96ee736 100644 --- a/arch/microblaze/include/asm/dma-mapping.h +++ b/arch/microblaze/include/asm/dma-mapping.h @@ -1 +1,154 @@ -#include +/* + * Implements the generic device dma API for microblaze and the pci + * + * Copyright (C) 2009-2010 Michal Simek + * Copyright (C) 2009-2010 PetaLogix + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file COPYING in the main directory of this + * archive for more details. + * + * This file is base on powerpc and x86 dma-mapping.h versions + * Copyright (C) 2004 IBM + */ + +#ifndef _ASM_MICROBLAZE_DMA_MAPPING_H +#define _ASM_MICROBLAZE_DMA_MAPPING_H + +/* + * See Documentation/PCI/PCI-DMA-mapping.txt and + * Documentation/DMA-API.txt for documentation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define DMA_ERROR_CODE (~(dma_addr_t)0x0) + +#define __dma_alloc_coherent(dev, gfp, size, handle) NULL +#define __dma_free_coherent(size, addr) ((void)0) +#define __dma_sync(addr, size, rw) ((void)0) +#define __dma_sync_page(pg, off, sz, rw) ((void)0) + +static inline unsigned long device_to_mask(struct device *dev) +{ + if (dev->dma_mask && *dev->dma_mask) + return *dev->dma_mask; + /* Assume devices without mask can take 32 bit addresses */ + return 0xfffffffful; +} + +extern struct dma_map_ops *dma_ops; + +/* + * Available generic sets of operations + */ +extern struct dma_map_ops dma_direct_ops; + +static inline struct dma_map_ops *get_dma_ops(struct device *dev) +{ + /* We don't handle the NULL dev case for ISA for now. We could + * do it via an out of line call but it is not needed for now. The + * only ISA DMA device we support is the floppy and we have a hack + * in the floppy driver directly to get a device for us. + */ + if (unlikely(!dev) || !dev->archdata.dma_ops) + return NULL; + + return dev->archdata.dma_ops; +} + +static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) +{ + dev->archdata.dma_ops = ops; +} + +static inline int dma_supported(struct device *dev, u64 mask) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + if (unlikely(!ops)) + return 0; + if (!ops->dma_supported) + return 1; + return ops->dma_supported(dev, mask); +} + +#ifdef CONFIG_PCI +/* We have our own implementation of pci_set_dma_mask() */ +#define HAVE_ARCH_PCI_SET_DMA_MASK + +#endif + +static inline int dma_set_mask(struct device *dev, u64 dma_mask) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + if (unlikely(ops == NULL)) + return -EIO; + if (ops->set_dma_mask) + return ops->set_dma_mask(dev, dma_mask); + if (!dev->dma_mask || !dma_supported(dev, dma_mask)) + return -EIO; + *dev->dma_mask = dma_mask; + return 0; +} + +#include + +static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + if (ops->mapping_error) + return ops->mapping_error(dev, dma_addr); + + return (dma_addr == DMA_ERROR_CODE); +} + +#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) +#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) +#define dma_is_consistent(d, h) (1) + +static inline void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + void *memory; + + BUG_ON(!ops); + + memory = ops->alloc_coherent(dev, size, dma_handle, flag); + + debug_dma_alloc_coherent(dev, size, *dma_handle, memory); + return memory; +} + +static inline void dma_free_coherent(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!ops); + debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); + ops->free_coherent(dev, size, cpu_addr, dma_handle); +} + +static inline int dma_get_cache_alignment(void) +{ + return L1_CACHE_BYTES; +} + +static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction direction) +{ + BUG_ON(direction == DMA_NONE); + __dma_sync(vaddr, size, (int)direction); +} + +#endif /* _ASM_MICROBLAZE_DMA_MAPPING_H */ diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h index 267c7c779e53..9ac409ad906b 100644 --- a/arch/microblaze/include/asm/io.h +++ b/arch/microblaze/include/asm/io.h @@ -16,6 +16,7 @@ #include #include /* Get struct page {...} */ +#define PCI_DRAM_OFFSET 0 #define IO_SPACE_LIMIT (0xFFFFFFFF) diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile index b07594eccf9b..e51bc1520825 100644 --- a/arch/microblaze/kernel/Makefile +++ b/arch/microblaze/kernel/Makefile @@ -14,7 +14,7 @@ endif extra-y := head.o vmlinux.lds -obj-y += exceptions.o \ +obj-y += dma.o exceptions.o \ hw_exception_handler.o init_task.o intc.o irq.o of_device.o \ of_platform.o process.o prom.o prom_parse.o ptrace.o \ setup.o signal.o sys_microblaze.o timer.o traps.o reset.o diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c new file mode 100644 index 000000000000..300fea46737e --- /dev/null +++ b/arch/microblaze/kernel/dma.c @@ -0,0 +1,124 @@ +/* + * Copyright (C) 2009-2010 PetaLogix + * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation + * + * Provide default implementations of the DMA mapping callbacks for + * directly mapped busses. + */ + +#include +#include +#include +#include + +/* + * Generic direct DMA implementation + * + * This implementation supports a per-device offset that can be applied if + * the address at which memory is visible to devices is not 0. Platform code + * can set archdata.dma_data to an unsigned long holding the offset. By + * default the offset is PCI_DRAM_OFFSET. + */ + +static unsigned long get_dma_direct_offset(struct device *dev) +{ + if (dev) + return (unsigned long)dev->archdata.dma_data; + + return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */ +} + +void *dma_direct_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + void *ret; + struct page *page; + int node = dev_to_node(dev); + + /* ignore region specifiers */ + flag &= ~(__GFP_HIGHMEM); + + page = alloc_pages_node(node, flag, get_order(size)); + if (page == NULL) + return NULL; + ret = page_address(page); + memset(ret, 0, size); + *dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev); + + return ret; +} + +void dma_direct_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle) +{ + free_pages((unsigned long)vaddr, get_order(size)); +} + +static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction direction, + struct dma_attrs *attrs) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nents, i) { + sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev); + sg->dma_length = sg->length; + __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); + } + + return nents; +} + +static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction direction, + struct dma_attrs *attrs) +{ +} + +static int dma_direct_dma_supported(struct device *dev, u64 mask) +{ + return 1; +} + +static inline dma_addr_t dma_direct_map_page(struct device *dev, + struct page *page, + unsigned long offset, + size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + BUG_ON(dir == DMA_NONE); + __dma_sync_page(page, offset, size, dir); + return page_to_phys(page) + offset + get_dma_direct_offset(dev); +} + +static inline void dma_direct_unmap_page(struct device *dev, + dma_addr_t dma_address, + size_t size, + enum dma_data_direction direction, + struct dma_attrs *attrs) +{ +} + +struct dma_map_ops dma_direct_ops = { + .alloc_coherent = dma_direct_alloc_coherent, + .free_coherent = dma_direct_free_coherent, + .map_sg = dma_direct_map_sg, + .unmap_sg = dma_direct_unmap_sg, + .dma_supported = dma_direct_dma_supported, + .map_page = dma_direct_map_page, + .unmap_page = dma_direct_unmap_page, +}; +EXPORT_SYMBOL(dma_direct_ops); + +/* Number of entries preallocated for DMA-API debugging */ +#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) + +static int __init dma_init(void) +{ + dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); + + return 0; +} +fs_initcall(dma_init); diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c index bb8c4b9ccb80..bc325ac4efd3 100644 --- a/arch/microblaze/kernel/setup.c +++ b/arch/microblaze/kernel/setup.c @@ -23,6 +23,8 @@ #include #include #include +#include +#include #include #include #include @@ -188,3 +190,37 @@ static int microblaze_debugfs_init(void) } arch_initcall(microblaze_debugfs_init); #endif + +static int dflt_bus_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct device *dev = data; + + /* We are only intereted in device addition */ + if (action != BUS_NOTIFY_ADD_DEVICE) + return 0; + + set_dma_ops(dev, &dma_direct_ops); + + return NOTIFY_DONE; +} + +static struct notifier_block dflt_plat_bus_notifier = { + .notifier_call = dflt_bus_notify, + .priority = INT_MAX, +}; + +static struct notifier_block dflt_of_bus_notifier = { + .notifier_call = dflt_bus_notify, + .priority = INT_MAX, +}; + +static int __init setup_bus_notifier(void) +{ + bus_register_notifier(&platform_bus_type, &dflt_plat_bus_notifier); + bus_register_notifier(&of_platform_bus_type, &dflt_of_bus_notifier); + + return 0; +} + +arch_initcall(setup_bus_notifier); -- cgit v1.2.3 From 2549edd353196d7de9c18e08146d7a8836f97235 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Wed, 20 Jan 2010 14:36:24 +0100 Subject: microblaze: Implement __dma_sync_page There is necessary to do some cache handling for dma operations. Signed-off-by: Michal Simek --- arch/microblaze/include/asm/dma-mapping.h | 1 - arch/microblaze/kernel/dma.c | 26 +++++++++++++++++++++++--- 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h index 096fa96ee736..18b3731c8509 100644 --- a/arch/microblaze/include/asm/dma-mapping.h +++ b/arch/microblaze/include/asm/dma-mapping.h @@ -34,7 +34,6 @@ #define __dma_alloc_coherent(dev, gfp, size, handle) NULL #define __dma_free_coherent(size, addr) ((void)0) #define __dma_sync(addr, size, rw) ((void)0) -#define __dma_sync_page(pg, off, sz, rw) ((void)0) static inline unsigned long device_to_mask(struct device *dev) { diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index 300fea46737e..64bc39f40ba7 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c @@ -10,6 +10,7 @@ #include #include #include +#include /* * Generic direct DMA implementation @@ -20,6 +21,23 @@ * default the offset is PCI_DRAM_OFFSET. */ +static inline void __dma_sync_page(void *vaddr, unsigned long offset, + size_t size, enum dma_data_direction direction) +{ + unsigned long start = virt_to_phys(vaddr); + + switch (direction) { + case DMA_TO_DEVICE: + flush_dcache_range(start + offset, start + offset + size); + break; + case DMA_FROM_DEVICE: + invalidate_dcache_range(start + offset, start + offset + size); + break; + default: + BUG(); + } +} + static unsigned long get_dma_direct_offset(struct device *dev) { if (dev) @@ -85,11 +103,11 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, - enum dma_data_direction dir, + enum dma_data_direction direction, struct dma_attrs *attrs) { - BUG_ON(dir == DMA_NONE); - __dma_sync_page(page, offset, size, dir); + BUG_ON(direction == DMA_NONE); + __dma_sync_page(page, offset, size, direction); return page_to_phys(page) + offset + get_dma_direct_offset(dev); } @@ -99,6 +117,8 @@ static inline void dma_direct_unmap_page(struct device *dev, enum dma_data_direction direction, struct dma_attrs *attrs) { +/* There is not necessary to do cache cleanup */ + /* __dma_sync_page(dma_address, 0 , size, direction); */ } struct dma_map_ops dma_direct_ops = { -- cgit v1.2.3 From a84642a339235020e6dccc022de27055f1fa9340 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Thu, 14 Jan 2010 17:03:49 +0100 Subject: microblaze: Add {z,}alloc_maybe_bootmem functions I will need {z,}alloc_maybe_bootmem functions for pci patches Signed-off-by: Michal Simek --- arch/microblaze/include/asm/system.h | 3 +++ arch/microblaze/mm/init.c | 23 +++++++++++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/arch/microblaze/include/asm/system.h b/arch/microblaze/include/asm/system.h index 157970688b2a..59efb3fef957 100644 --- a/arch/microblaze/include/asm/system.h +++ b/arch/microblaze/include/asm/system.h @@ -87,6 +87,9 @@ void free_initmem(void); extern char *klimit; extern void ret_from_fork(void); +extern void *alloc_maybe_bootmem(size_t size, gfp_t mask); +extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); + #ifdef CONFIG_DEBUG_FS extern struct dentry *of_debugfs_root; #endif diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index a57cedf36715..6eea5544ad8b 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -349,4 +349,27 @@ void __init *early_get_page(void) } return p; } + +void * __init_refok alloc_maybe_bootmem(size_t size, gfp_t mask) +{ + if (mem_init_done) + return kmalloc(size, mask); + else + return alloc_bootmem(size); +} + +void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask) +{ + void *p; + + if (mem_init_done) + p = kzalloc(size, mask); + else { + p = alloc_bootmem(size); + if (p) + memset(p, 0, size); + } + return p; +} + #endif /* CONFIG_MMU */ -- cgit v1.2.3 From c6ba01a4c7806d134c8d483525997559071d0990 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Thu, 14 Jan 2010 15:16:31 +0100 Subject: microblaze: Add irq_create_{of_,}mapping functions Support function for PCI. We don't use any advance mapping mechanism that's why implementation is simple. Signed-off-by: Michal Simek --- arch/microblaze/include/asm/irq.h | 37 ++++++++++++++++++++++++++++++++++++- arch/microblaze/kernel/irq.c | 15 +++++++++++++++ 2 files changed, 51 insertions(+), 1 deletion(-) diff --git a/arch/microblaze/include/asm/irq.h b/arch/microblaze/include/asm/irq.h index 90f050535ebe..31a35c33df63 100644 --- a/arch/microblaze/include/asm/irq.h +++ b/arch/microblaze/include/asm/irq.h @@ -14,6 +14,12 @@ #include +/* This type is the placeholder for a hardware interrupt number. It has to + * be big enough to enclose whatever representation is used by a given + * platform. + */ +typedef unsigned long irq_hw_number_t; + extern unsigned int nr_irq; #define NO_IRQ (-1) @@ -21,7 +27,8 @@ extern unsigned int nr_irq; struct pt_regs; extern void do_IRQ(struct pt_regs *regs); -/* irq_of_parse_and_map - Parse and Map an interrupt into linux virq space +/** + * irq_of_parse_and_map - Parse and Map an interrupt into linux virq space * @device: Device node of the device whose interrupt is to be mapped * @index: Index of the interrupt to map * @@ -40,4 +47,32 @@ static inline void irq_dispose_mapping(unsigned int virq) return; } +struct irq_host; + +/** + * irq_create_mapping - Map a hardware interrupt into linux virq space + * @host: host owning this hardware interrupt or NULL for default host + * @hwirq: hardware irq number in that host space + * + * Only one mapping per hardware interrupt is permitted. Returns a linux + * virq number. + * If the sense/trigger is to be specified, set_irq_type() should be called + * on the number returned from that call. + */ +extern unsigned int irq_create_mapping(struct irq_host *host, + irq_hw_number_t hwirq); + +/** + * irq_create_of_mapping - Map a hardware interrupt into linux virq space + * @controller: Device node of the interrupt controller + * @inspec: Interrupt specifier from the device-tree + * @intsize: Size of the interrupt specifier from the device-tree + * + * This function is identical to irq_create_mapping except that it takes + * as input informations straight from the device-tree (typically the results + * of the of_irq_map_*() functions. + */ +extern unsigned int irq_create_of_mapping(struct device_node *controller, + u32 *intspec, unsigned int intsize); + #endif /* _ASM_MICROBLAZE_IRQ_H */ diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c index 0f06034d1fe0..6f39e2c001f3 100644 --- a/arch/microblaze/kernel/irq.c +++ b/arch/microblaze/kernel/irq.c @@ -93,3 +93,18 @@ skip: } return 0; } + +/* MS: There is no any advance mapping mechanism. We are using simple 32bit + intc without any cascades or any connection that's why mapping is 1:1 */ +unsigned int irq_create_mapping(struct irq_host *host, irq_hw_number_t hwirq) +{ + return hwirq; +} +EXPORT_SYMBOL_GPL(irq_create_mapping); + +unsigned int irq_create_of_mapping(struct device_node *controller, + u32 *intspec, unsigned int intsize) +{ + return intspec[0]; +} +EXPORT_SYMBOL_GPL(irq_create_of_mapping); -- cgit v1.2.3 From 2ddafeab6f159640299d17fb9b73b57f65011d85 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Thu, 14 Jan 2010 15:40:11 +0100 Subject: microblaze: io.h include asm-generic/iomap.h I need to use generic/iomap.h for PCI that's why is necessary to include it and fix ioport_{map,unmap} functions. Signed-off-by: Michal Simek --- arch/microblaze/include/asm/io.h | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h index 9ac409ad906b..f82df5d221a8 100644 --- a/arch/microblaze/include/asm/io.h +++ b/arch/microblaze/include/asm/io.h @@ -15,6 +15,7 @@ #include #include #include /* Get struct page {...} */ +#include #define PCI_DRAM_OFFSET 0 @@ -228,15 +229,7 @@ static inline void __iomem *__ioremap(phys_addr_t address, unsigned long size, #define out_8(a, v) __raw_writeb((v), (a)) #define in_8(a) __raw_readb(a) -/* FIXME */ -static inline void __iomem *ioport_map(unsigned long port, unsigned int len) -{ - return (void __iomem *) (port); -} - -static inline void ioport_unmap(void __iomem *addr) -{ - /* Nothing to do */ -} +#define ioport_map(port, nr) ((void __iomem *)(port)) +#define ioport_unmap(addr) #endif /* _ASM_MICROBLAZE_IO_H */ -- cgit v1.2.3 From 830980a0a83e0b5af97c31f24dc7f1b57aa9ccea Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Mon, 18 Jan 2010 14:23:16 +0100 Subject: microblaze: Add pci.h Add pci.h for microblaze. It is based on powerpc pci.h Signed-off-by: Michal Simek --- arch/microblaze/include/asm/pci.h | 171 +++++++++++++++++++++++++++++++++++++- 1 file changed, 170 insertions(+), 1 deletion(-) diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h index 9f0df5faf2c8..fecc04481101 100644 --- a/arch/microblaze/include/asm/pci.h +++ b/arch/microblaze/include/asm/pci.h @@ -1 +1,170 @@ -#include +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Based on powerpc version + */ + +#ifndef __ASM_MICROBLAZE_PCI_H +#define __ASM_MICROBLAZE_PCI_H +#ifdef __KERNEL__ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define PCIBIOS_MIN_IO 0x1000 +#define PCIBIOS_MIN_MEM 0x10000000 + +struct pci_dev; + +/* Values for the `which' argument to sys_pciconfig_iobase syscall. */ +#define IOBASE_BRIDGE_NUMBER 0 +#define IOBASE_MEMORY 1 +#define IOBASE_IO 2 +#define IOBASE_ISA_IO 3 +#define IOBASE_ISA_MEM 4 + +#define pcibios_scan_all_fns(a, b) 0 + +/* + * Set this to 1 if you want the kernel to re-assign all PCI + * bus numbers (don't do that on ppc64 yet !) + */ +#define pcibios_assign_all_busses() \ + (pci_has_flag(PCI_REASSIGN_ALL_BUS)) + +static inline void pcibios_set_master(struct pci_dev *dev) +{ + /* No special bus mastering setup handling */ +} + +static inline void pcibios_penalize_isa_irq(int irq, int active) +{ + /* We don't do dynamic PCI IRQ allocation */ +} + +#ifdef CONFIG_PCI +extern void set_pci_dma_ops(struct dma_map_ops *dma_ops); +extern struct dma_map_ops *get_pci_dma_ops(void); +#else /* CONFIG_PCI */ +#define set_pci_dma_ops(d) +#define get_pci_dma_ops() NULL +#endif + +#ifdef CONFIG_PCI +static inline void pci_dma_burst_advice(struct pci_dev *pdev, + enum pci_dma_burst_strategy *strat, + unsigned long *strategy_parameter) +{ + *strat = PCI_DMA_BURST_INFINITY; + *strategy_parameter = ~0UL; +} +#endif + +extern int pci_domain_nr(struct pci_bus *bus); + +/* Decide whether to display the domain number in /proc */ +extern int pci_proc_domain(struct pci_bus *bus); + +struct vm_area_struct; +/* Map a range of PCI memory or I/O space for a device into user space */ +int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma, + enum pci_mmap_state mmap_state, int write_combine); + +/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */ +#define HAVE_PCI_MMAP 1 + +extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, + size_t count); +extern int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, + size_t count); +extern int pci_mmap_legacy_page_range(struct pci_bus *bus, + struct vm_area_struct *vma, + enum pci_mmap_state mmap_state); + +#define HAVE_PCI_LEGACY 1 + +/* pci_unmap_{page,single} is a nop so... */ +#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) +#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) +#define pci_unmap_addr(PTR, ADDR_NAME) (0) +#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) +#define pci_unmap_len(PTR, LEN_NAME) (0) +#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) + +/* The PCI address space does equal the physical memory + * address space (no IOMMU). The IDE and SCSI device layers use + * this boolean for bounce buffer decisions. + */ +#define PCI_DMA_BUS_IS_PHYS (1) + +extern void pcibios_resource_to_bus(struct pci_dev *dev, + struct pci_bus_region *region, + struct resource *res); + +extern void pcibios_bus_to_resource(struct pci_dev *dev, + struct resource *res, + struct pci_bus_region *region); + +static inline struct resource *pcibios_select_root(struct pci_dev *pdev, + struct resource *res) +{ + struct resource *root = NULL; + + if (res->flags & IORESOURCE_IO) + root = &ioport_resource; + if (res->flags & IORESOURCE_MEM) + root = &iomem_resource; + + return root; +} + +extern void pcibios_claim_one_bus(struct pci_bus *b); + +extern void pcibios_finish_adding_to_bus(struct pci_bus *bus); + +extern void pcibios_resource_survey(void); + +extern struct pci_controller *init_phb_dynamic(struct device_node *dn); +extern int remove_phb_dynamic(struct pci_controller *phb); + +extern struct pci_dev *of_create_pci_dev(struct device_node *node, + struct pci_bus *bus, int devfn); + +extern void of_scan_pci_bridge(struct device_node *node, + struct pci_dev *dev); + +extern void of_scan_bus(struct device_node *node, struct pci_bus *bus); +extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus); + +extern int pci_read_irq_line(struct pci_dev *dev); + +extern int pci_bus_find_capability(struct pci_bus *bus, + unsigned int devfn, int cap); + +struct file; +extern pgprot_t pci_phys_mem_access_prot(struct file *file, + unsigned long pfn, + unsigned long size, + pgprot_t prot); + +#define HAVE_ARCH_PCI_RESOURCE_TO_USER +extern void pci_resource_to_user(const struct pci_dev *dev, int bar, + const struct resource *rsrc, + resource_size_t *start, resource_size_t *end); + +extern void pcibios_setup_bus_devices(struct pci_bus *bus); +extern void pcibios_setup_bus_self(struct pci_bus *bus); + +#endif /* __KERNEL__ */ +#endif /* __ASM_MICROBLAZE_PCI_H */ -- cgit v1.2.3 From 2ed975b43c1bf09652795a355c4e820e21c4ec44 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Mon, 18 Jan 2010 14:25:08 +0100 Subject: microblaze: Add pci-bridge.h Add pci-bridge.h for Microblaze. It is based on powerpc header file. My changes: I removed PPC_ prefix from constants Removed ppc64 specifis parts Signed-off-by: Michal Simek --- arch/microblaze/include/asm/pci-bridge.h | 195 +++++++++++++++++++++++++++++++ 1 file changed, 195 insertions(+) diff --git a/arch/microblaze/include/asm/pci-bridge.h b/arch/microblaze/include/asm/pci-bridge.h index 7ad28f6f5f1a..0c77cda9f5d8 100644 --- a/arch/microblaze/include/asm/pci-bridge.h +++ b/arch/microblaze/include/asm/pci-bridge.h @@ -1 +1,196 @@ +#ifndef _ASM_MICROBLAZE_PCI_BRIDGE_H +#define _ASM_MICROBLAZE_PCI_BRIDGE_H +#ifdef __KERNEL__ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ #include +#include +#include + +struct device_node; + +enum { + /* Force re-assigning all resources (ignore firmware + * setup completely) + */ + PCI_REASSIGN_ALL_RSRC = 0x00000001, + + /* Re-assign all bus numbers */ + PCI_REASSIGN_ALL_BUS = 0x00000002, + + /* Do not try to assign, just use existing setup */ + PCI_PROBE_ONLY = 0x00000004, + + /* Don't bother with ISA alignment unless the bridge has + * ISA forwarding enabled + */ + PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, + + /* Enable domain numbers in /proc */ + PCI_ENABLE_PROC_DOMAINS = 0x00000010, + /* ... except for domain 0 */ + PCI_COMPAT_DOMAIN_0 = 0x00000020, +}; + +/* + * Structure of a PCI controller (host bridge) + */ +struct pci_controller { + struct pci_bus *bus; + char is_dynamic; + struct device_node *dn; + struct list_head list_node; + struct device *parent; + + int first_busno; + int last_busno; + + int self_busno; + + void __iomem *io_base_virt; + resource_size_t io_base_phys; + + resource_size_t pci_io_size; + + /* Some machines (PReP) have a non 1:1 mapping of + * the PCI memory space in the CPU bus space + */ + resource_size_t pci_mem_offset; + + /* Some machines have a special region to forward the ISA + * "memory" cycles such as VGA memory regions. Left to 0 + * if unsupported + */ + resource_size_t isa_mem_phys; + resource_size_t isa_mem_size; + + struct pci_ops *ops; + unsigned int __iomem *cfg_addr; + void __iomem *cfg_data; + + /* + * Used for variants of PCI indirect handling and possible quirks: + * SET_CFG_TYPE - used on 4xx or any PHB that does explicit type0/1 + * EXT_REG - provides access to PCI-e extended registers + * SURPRESS_PRIMARY_BUS - we surpress the setting of PCI_PRIMARY_BUS + * on Freescale PCI-e controllers since they used the PCI_PRIMARY_BUS + * to determine which bus number to match on when generating type0 + * config cycles + * NO_PCIE_LINK - the Freescale PCI-e controllers have issues with + * hanging if we don't have link and try to do config cycles to + * anything but the PHB. Only allow talking to the PHB if this is + * set. + * BIG_ENDIAN - cfg_addr is a big endian register + * BROKEN_MRM - the 440EPx/GRx chips have an errata that causes hangs + * on the PLB4. Effectively disable MRM commands by setting this. + */ +#define INDIRECT_TYPE_SET_CFG_TYPE 0x00000001 +#define INDIRECT_TYPE_EXT_REG 0x00000002 +#define INDIRECT_TYPE_SURPRESS_PRIMARY_BUS 0x00000004 +#define INDIRECT_TYPE_NO_PCIE_LINK 0x00000008 +#define INDIRECT_TYPE_BIG_ENDIAN 0x00000010 +#define INDIRECT_TYPE_BROKEN_MRM 0x00000020 + u32 indirect_type; + + /* Currently, we limit ourselves to 1 IO range and 3 mem + * ranges since the common pci_bus structure can't handle more + */ + struct resource io_resource; + struct resource mem_resources[3]; + int global_number; /* PCI domain number */ +}; + +static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus) +{ + return bus->sysdata; +} + +static inline int isa_vaddr_is_ioport(void __iomem *address) +{ + /* No specific ISA handling on ppc32 at this stage, it + * all goes through PCI + */ + return 0; +} + +/* These are used for config access before all the PCI probing + has been done. */ +extern int early_read_config_byte(struct pci_controller *hose, int bus, + int dev_fn, int where, u8 *val); +extern int early_read_config_word(struct pci_controller *hose, int bus, + int dev_fn, int where, u16 *val); +extern int early_read_config_dword(struct pci_controller *hose, int bus, + int dev_fn, int where, u32 *val); +extern int early_write_config_byte(struct pci_controller *hose, int bus, + int dev_fn, int where, u8 val); +extern int early_write_config_word(struct pci_controller *hose, int bus, + int dev_fn, int where, u16 val); +extern int early_write_config_dword(struct pci_controller *hose, int bus, + int dev_fn, int where, u32 val); + +extern int early_find_capability(struct pci_controller *hose, int bus, + int dev_fn, int cap); + +extern void setup_indirect_pci(struct pci_controller *hose, + resource_size_t cfg_addr, + resource_size_t cfg_data, u32 flags); + +/* Get the PCI host controller for an OF device */ +extern struct pci_controller *pci_find_hose_for_OF_device( + struct device_node *node); + +/* Fill up host controller resources from the OF node */ +extern void pci_process_bridge_OF_ranges(struct pci_controller *hose, + struct device_node *dev, int primary); + +/* Allocate & free a PCI host bridge structure */ +extern struct pci_controller *pcibios_alloc_controller(struct device_node *dev); +extern void pcibios_free_controller(struct pci_controller *phb); +extern void pcibios_setup_phb_resources(struct pci_controller *hose); + +#ifdef CONFIG_PCI +extern unsigned int pci_flags; + +static inline void pci_set_flags(int flags) +{ + pci_flags = flags; +} + +static inline void pci_add_flags(int flags) +{ + pci_flags |= flags; +} + +static inline int pci_has_flag(int flag) +{ + return pci_flags & flag; +} + +extern struct list_head hose_list; + +extern unsigned long pci_address_to_pio(phys_addr_t address); +extern int pcibios_vaddr_is_ioport(void __iomem *address); +#else +static inline unsigned long pci_address_to_pio(phys_addr_t address) +{ + return (unsigned long)-1; +} +static inline int pcibios_vaddr_is_ioport(void __iomem *address) +{ + return 0; +} + +static inline void pci_set_flags(int flags) { } +static inline void pci_add_flags(int flags) { } +static inline int pci_has_flag(int flag) +{ + return 0; +} +#endif /* CONFIG_PCI */ + +#endif /* __KERNEL__ */ +#endif /* _ASM_MICROBLAZE_PCI_BRIDGE_H */ -- cgit v1.2.3 From d3afa58c20b65155af9f0d5eaa59fe2d367ac432 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Mon, 18 Jan 2010 14:42:34 +0100 Subject: microblaze: Add core PCI files Add pci-common.h and pci32.c. Files are based on PPC version. There are removed ppc specific parts and the code was completely clean. Signed-off-by: Michal Simek --- arch/microblaze/pci/pci-common.c | 1640 ++++++++++++++++++++++++++++++++++++++ arch/microblaze/pci/pci_32.c | 430 ++++++++++ 2 files changed, 2070 insertions(+) create mode 100644 arch/microblaze/pci/pci-common.c create mode 100644 arch/microblaze/pci/pci_32.c diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c new file mode 100644 index 000000000000..f03f8be2740a --- /dev/null +++ b/arch/microblaze/pci/pci-common.c @@ -0,0 +1,1640 @@ +/* + * Contains common pci routines for ALL ppc platform + * (based on pci_32.c and pci_64.c) + * + * Port for PPC64 David Engebretsen, IBM Corp. + * Contains common pci routines for ppc64 platform, pSeries and iSeries brands. + * + * Copyright (C) 2003 Anton Blanchard , IBM + * Rework, based on alpha PCI code. + * + * Common pmac/prep/chrp pci routines. -- Cort + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +static DEFINE_SPINLOCK(hose_spinlock); +LIST_HEAD(hose_list); + +/* XXX kill that some day ... */ +static int global_phb_number; /* Global phb counter */ + +/* ISA Memory physical address */ +resource_size_t isa_mem_base; + +/* Default PCI flags is 0 on ppc32, modified at boot on ppc64 */ +unsigned int pci_flags; + +static struct dma_map_ops *pci_dma_ops = &dma_direct_ops; + +void set_pci_dma_ops(struct dma_map_ops *dma_ops) +{ + pci_dma_ops = dma_ops; +} + +struct dma_map_ops *get_pci_dma_ops(void) +{ + return pci_dma_ops; +} +EXPORT_SYMBOL(get_pci_dma_ops); + +int pci_set_dma_mask(struct pci_dev *dev, u64 mask) +{ + return dma_set_mask(&dev->dev, mask); +} + +int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) +{ + int rc; + + rc = dma_set_mask(&dev->dev, mask); + dev->dev.coherent_dma_mask = dev->dma_mask; + + return rc; +} + +struct pci_controller *pcibios_alloc_controller(struct device_node *dev) +{ + struct pci_controller *phb; + + phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL); + if (!phb) + return NULL; + spin_lock(&hose_spinlock); + phb->global_number = global_phb_number++; + list_add_tail(&phb->list_node, &hose_list); + spin_unlock(&hose_spinlock); + phb->dn = dev; + phb->is_dynamic = mem_init_done; + return phb; +} + +void pcibios_free_controller(struct pci_controller *phb) +{ + spin_lock(&hose_spinlock); + list_del(&phb->list_node); + spin_unlock(&hose_spinlock); + + if (phb->is_dynamic) + kfree(phb); +} + +static resource_size_t pcibios_io_size(const struct pci_controller *hose) +{ + return hose->io_resource.end - hose->io_resource.start + 1; +} + +int pcibios_vaddr_is_ioport(void __iomem *address) +{ + int ret = 0; + struct pci_controller *hose; + resource_size_t size; + + spin_lock(&hose_spinlock); + list_for_each_entry(hose, &hose_list, list_node) { + size = pcibios_io_size(hose); + if (address >= hose->io_base_virt && + address < (hose->io_base_virt + size)) { + ret = 1; + break; + } + } + spin_unlock(&hose_spinlock); + return ret; +} + +unsigned long pci_address_to_pio(phys_addr_t address) +{ + struct pci_controller *hose; + resource_size_t size; + unsigned long ret = ~0; + + spin_lock(&hose_spinlock); + list_for_each_entry(hose, &hose_list, list_node) { + size = pcibios_io_size(hose); + if (address >= hose->io_base_phys && + address < (hose->io_base_phys + size)) { + unsigned long base = + (unsigned long)hose->io_base_virt - _IO_BASE; + ret = base + (address - hose->io_base_phys); + break; + } + } + spin_unlock(&hose_spinlock); + + return ret; +} +EXPORT_SYMBOL_GPL(pci_address_to_pio); + +/* + * Return the domain number for this bus. + */ +int pci_domain_nr(struct pci_bus *bus) +{ + struct pci_controller *hose = pci_bus_to_host(bus); + + return hose->global_number; +} +EXPORT_SYMBOL(pci_domain_nr); + +/* This routine is meant to be used early during boot, when the + * PCI bus numbers have not yet been assigned, and you need to + * issue PCI config cycles to an OF device. + * It could also be used to "fix" RTAS config cycles if you want + * to set pci_assign_all_buses to 1 and still use RTAS for PCI + * config cycles. + */ +struct pci_controller *pci_find_hose_for_OF_device(struct device_node *node) +{ + while (node) { + struct pci_controller *hose, *tmp; + list_for_each_entry_safe(hose, tmp, &hose_list, list_node) + if (hose->dn == node) + return hose; + node = node->parent; + } + return NULL; +} + +static ssize_t pci_show_devspec(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pci_dev *pdev; + struct device_node *np; + + pdev = to_pci_dev(dev); + np = pci_device_to_OF_node(pdev); + if (np == NULL || np->full_name == NULL) + return 0; + return sprintf(buf, "%s", np->full_name); +} +static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL); + +/* Add sysfs properties */ +int pcibios_add_platform_entries(struct pci_dev *pdev) +{ + return device_create_file(&pdev->dev, &dev_attr_devspec); +} + +char __devinit *pcibios_setup(char *str) +{ + return str; +} + +/* + * Reads the interrupt pin to determine if interrupt is use by card. + * If the interrupt is used, then gets the interrupt line from the + * openfirmware and sets it in the pci_dev and pci_config line. + */ +int pci_read_irq_line(struct pci_dev *pci_dev) +{ + struct of_irq oirq; + unsigned int virq; + + /* The current device-tree that iSeries generates from the HV + * PCI informations doesn't contain proper interrupt routing, + * and all the fallback would do is print out crap, so we + * don't attempt to resolve the interrupts here at all, some + * iSeries specific fixup does it. + * + * In the long run, we will hopefully fix the generated device-tree + * instead. + */ + pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev)); + +#ifdef DEBUG + memset(&oirq, 0xff, sizeof(oirq)); +#endif + /* Try to get a mapping from the device-tree */ + if (of_irq_map_pci(pci_dev, &oirq)) { + u8 line, pin; + + /* If that fails, lets fallback to what is in the config + * space and map that through the default controller. We + * also set the type to level low since that's what PCI + * interrupts are. If your platform does differently, then + * either provide a proper interrupt tree or don't use this + * function. + */ + if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin)) + return -1; + if (pin == 0) + return -1; + if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) || + line == 0xff || line == 0) { + return -1; + } + pr_debug(" No map ! Using line %d (pin %d) from PCI config\n", + line, pin); + + virq = irq_create_mapping(NULL, line); + if (virq != NO_IRQ) + set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); + } else { + pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n", + oirq.size, oirq.specifier[0], oirq.specifier[1], + oirq.controller ? oirq.controller->full_name : + ""); + + virq = irq_create_of_mapping(oirq.controller, oirq.specifier, + oirq.size); + } + if (virq == NO_IRQ) { + pr_debug(" Failed to map !\n"); + return -1; + } + + pr_debug(" Mapped to linux irq %d\n", virq); + + pci_dev->irq = virq; + + return 0; +} +EXPORT_SYMBOL(pci_read_irq_line); + +/* + * Platform support for /proc/bus/pci/X/Y mmap()s, + * modelled on the sparc64 implementation by Dave Miller. + * -- paulus. + */ + +/* + * Adjust vm_pgoff of VMA such that it is the physical page offset + * corresponding to the 32-bit pci bus offset for DEV requested by the user. + * + * Basically, the user finds the base address for his device which he wishes + * to mmap. They read the 32-bit value from the config space base register, + * add whatever PAGE_SIZE multiple offset they wish, and feed this into the + * offset parameter of mmap on /proc/bus/pci/XXX for that device. + * + * Returns negative error code on failure, zero on success. + */ +static struct resource *__pci_mmap_make_offset(struct pci_dev *dev, + resource_size_t *offset, + enum pci_mmap_state mmap_state) +{ + struct pci_controller *hose = pci_bus_to_host(dev->bus); + unsigned long io_offset = 0; + int i, res_bit; + + if (hose == 0) + return NULL; /* should never happen */ + + /* If memory, add on the PCI bridge address offset */ + if (mmap_state == pci_mmap_mem) { +#if 0 /* See comment in pci_resource_to_user() for why this is disabled */ + *offset += hose->pci_mem_offset; +#endif + res_bit = IORESOURCE_MEM; + } else { + io_offset = (unsigned long)hose->io_base_virt - _IO_BASE; + *offset += io_offset; + res_bit = IORESOURCE_IO; + } + + /* + * Check that the offset requested corresponds to one of the + * resources of the device. + */ + for (i = 0; i <= PCI_ROM_RESOURCE; i++) { + struct resource *rp = &dev->resource[i]; + int flags = rp->flags; + + /* treat ROM as memory (should be already) */ + if (i == PCI_ROM_RESOURCE) + flags |= IORESOURCE_MEM; + + /* Active and same type? */ + if ((flags & res_bit) == 0) + continue; + + /* In the range of this resource? */ + if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end) + continue; + + /* found it! construct the final physical address */ + if (mmap_state == pci_mmap_io) + *offset += hose->io_base_phys - io_offset; + return rp; + } + + return NULL; +} + +/* + * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci + * device mapping. + */ +static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, + pgprot_t protection, + enum pci_mmap_state mmap_state, + int write_combine) +{ + pgprot_t prot = protection; + + /* Write combine is always 0 on non-memory space mappings. On + * memory space, if the user didn't pass 1, we check for a + * "prefetchable" resource. This is a bit hackish, but we use + * this to workaround the inability of /sysfs to provide a write + * combine bit + */ + if (mmap_state != pci_mmap_mem) + write_combine = 0; + else if (write_combine == 0) { + if (rp->flags & IORESOURCE_PREFETCH) + write_combine = 1; + } + + return pgprot_noncached(prot); +} + +/* + * This one is used by /dev/mem and fbdev who have no clue about the + * PCI device, it tries to find the PCI device first and calls the + * above routine + */ +pgprot_t pci_phys_mem_access_prot(struct file *file, + unsigned long pfn, + unsigned long size, + pgprot_t prot) +{ + struct pci_dev *pdev = NULL; + struct resource *found = NULL; + resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT; + int i; + + if (page_is_ram(pfn)) + return prot; + + prot = pgprot_noncached(prot); + for_each_pci_dev(pdev) { + for (i = 0; i <= PCI_ROM_RESOURCE; i++) { + struct resource *rp = &pdev->resource[i]; + int flags = rp->flags; + + /* Active and same type? */ + if ((flags & IORESOURCE_MEM) == 0) + continue; + /* In the range of this resource? */ + if (offset < (rp->start & PAGE_MASK) || + offset > rp->end) + continue; + found = rp; + break; + } + if (found) + break; + } + if (found) { + if (found->flags & IORESOURCE_PREFETCH) + prot = pgprot_noncached_wc(prot); + pci_dev_put(pdev); + } + + pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n", + (unsigned long long)offset, pgprot_val(prot)); + + return prot; +} + +/* + * Perform the actual remap of the pages for a PCI device mapping, as + * appropriate for this architecture. The region in the process to map + * is described by vm_start and vm_end members of VMA, the base physical + * address is found in vm_pgoff. + * The pci device structure is provided so that architectures may make mapping + * decisions on a per-device or per-bus basis. + * + * Returns a negative error code on failure, zero on success. + */ +int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, + enum pci_mmap_state mmap_state, int write_combine) +{ + resource_size_t offset = + ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT; + struct resource *rp; + int ret; + + rp = __pci_mmap_make_offset(dev, &offset, mmap_state); + if (rp == NULL) + return -EINVAL; + + vma->vm_pgoff = offset >> PAGE_SHIFT; + vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp, + vma->vm_page_prot, + mmap_state, write_combine); + + ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma->vm_end - vma->vm_start, vma->vm_page_prot); + + return ret; +} + +/* This provides legacy IO read access on a bus */ +int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size) +{ + unsigned long offset; + struct pci_controller *hose = pci_bus_to_host(bus); + struct resource *rp = &hose->io_resource; + void __iomem *addr; + + /* Check if port can be supported by that bus. We only check + * the ranges of the PHB though, not the bus itself as the rules + * for forwarding legacy cycles down bridges are not our problem + * here. So if the host bridge supports it, we do it. + */ + offset = (unsigned long)hose->io_base_virt - _IO_BASE; + offset += port; + + if (!(rp->flags & IORESOURCE_IO)) + return -ENXIO; + if (offset < rp->start || (offset + size) > rp->end) + return -ENXIO; + addr = hose->io_base_virt + port; + + switch (size) { + case 1: + *((u8 *)val) = in_8(addr); + return 1; + case 2: + if (port & 1) + return -EINVAL; + *((u16 *)val) = in_le16(addr); + return 2; + case 4: + if (port & 3) + return -EINVAL; + *((u32 *)val) = in_le32(addr); + return 4; + } + return -EINVAL; +} + +/* This provides legacy IO write access on a bus */ +int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size) +{ + unsigned long offset; + struct pci_controller *hose = pci_bus_to_host(bus); + struct resource *rp = &hose->io_resource; + void __iomem *addr; + + /* Check if port can be supported by that bus. We only check + * the ranges of the PHB though, not the bus itself as the rules + * for forwarding legacy cycles down bridges are not our problem + * here. So if the host bridge supports it, we do it. + */ + offset = (unsigned long)hose->io_base_virt - _IO_BASE; + offset += port; + + if (!(rp->flags & IORESOURCE_IO)) + return -ENXIO; + if (offset < rp->start || (offset + size) > rp->end) + return -ENXIO; + addr = hose->io_base_virt + port; + + /* WARNING: The generic code is idiotic. It gets passed a pointer + * to what can be a 1, 2 or 4 byte quantity and always reads that + * as a u32, which means that we have to correct the location of + * the data read within those 32 bits for size 1 and 2 + */ + switch (size) { + case 1: + out_8(addr, val >> 24); + return 1; + case 2: + if (port & 1) + return -EINVAL; + out_le16(addr, val >> 16); + return 2; + case 4: + if (port & 3) + return -EINVAL; + out_le32(addr, val); + return 4; + } + return -EINVAL; +} + +/* This provides legacy IO or memory mmap access on a bus */ +int pci_mmap_legacy_page_range(struct pci_bus *bus, + struct vm_area_struct *vma, + enum pci_mmap_state mmap_state) +{ + struct pci_controller *hose = pci_bus_to_host(bus); + resource_size_t offset = + ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT; + resource_size_t size = vma->vm_end - vma->vm_start; + struct resource *rp; + + pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n", + pci_domain_nr(bus), bus->number, + mmap_state == pci_mmap_mem ? "MEM" : "IO", + (unsigned long long)offset, + (unsigned long long)(offset + size - 1)); + + if (mmap_state == pci_mmap_mem) { + /* Hack alert ! + * + * Because X is lame and can fail starting if it gets an error + * trying to mmap legacy_mem (instead of just moving on without + * legacy memory access) we fake it here by giving it anonymous + * memory, effectively behaving just like /dev/zero + */ + if ((offset + size) > hose->isa_mem_size) { + printk(KERN_DEBUG + "Process %s (pid:%d) mapped non-existing PCI" + "legacy memory for 0%04x:%02x\n", + current->comm, current->pid, pci_domain_nr(bus), + bus->number); + if (vma->vm_flags & VM_SHARED) + return shmem_zero_setup(vma); + return 0; + } + offset += hose->isa_mem_phys; + } else { + unsigned long io_offset = (unsigned long)hose->io_base_virt - \ + _IO_BASE; + unsigned long roffset = offset + io_offset; + rp = &hose->io_resource; + if (!(rp->flags & IORESOURCE_IO)) + return -ENXIO; + if (roffset < rp->start || (roffset + size) > rp->end) + return -ENXIO; + offset += hose->io_base_phys; + } + pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset); + + vma->vm_pgoff = offset >> PAGE_SHIFT; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); +} + +void pci_resource_to_user(const struct pci_dev *dev, int bar, + const struct resource *rsrc, + resource_size_t *start, resource_size_t *end) +{ + struct pci_controller *hose = pci_bus_to_host(dev->bus); + resource_size_t offset = 0; + + if (hose == NULL) + return; + + if (rsrc->flags & IORESOURCE_IO) + offset = (unsigned long)hose->io_base_virt - _IO_BASE; + + /* We pass a fully fixed up address to userland for MMIO instead of + * a BAR value because X is lame and expects to be able to use that + * to pass to /dev/mem ! + * + * That means that we'll have potentially 64 bits values where some + * userland apps only expect 32 (like X itself since it thinks only + * Sparc has 64 bits MMIO) but if we don't do that, we break it on + * 32 bits CHRPs :-( + * + * Hopefully, the sysfs insterface is immune to that gunk. Once X + * has been fixed (and the fix spread enough), we can re-enable the + * 2 lines below and pass down a BAR value to userland. In that case + * we'll also have to re-enable the matching code in + * __pci_mmap_make_offset(). + * + * BenH. + */ +#if 0 + else if (rsrc->flags & IORESOURCE_MEM) + offset = hose->pci_mem_offset; +#endif + + *start = rsrc->start - offset; + *end = rsrc->end - offset; +} + +/** + * pci_process_bridge_OF_ranges - Parse PCI bridge resources from device tree + * @hose: newly allocated pci_controller to be setup + * @dev: device node of the host bridge + * @primary: set if primary bus (32 bits only, soon to be deprecated) + * + * This function will parse the "ranges" property of a PCI host bridge device + * node and setup the resource mapping of a pci controller based on its + * content. + * + * Life would be boring if it wasn't for a few issues that we have to deal + * with here: + * + * - We can only cope with one IO space range and up to 3 Memory space + * ranges. However, some machines (thanks Apple !) tend to split their + * space into lots of small contiguous ranges. So we have to coalesce. + * + * - We can only cope with all memory ranges having the same offset + * between CPU addresses and PCI addresses. Unfortunately, some bridges + * are setup for a large 1:1 mapping along with a small "window" which + * maps PCI address 0 to some arbitrary high address of the CPU space in + * order to give access to the ISA memory hole. + * The way out of here that I've chosen for now is to always set the + * offset based on the first resource found, then override it if we + * have a different offset and the previous was set by an ISA hole. + * + * - Some busses have IO space not starting at 0, which causes trouble with + * the way we do our IO resource renumbering. The code somewhat deals with + * it for 64 bits but I would expect problems on 32 bits. + * + * - Some 32 bits platforms such as 4xx can have physical space larger than + * 32 bits so we need to use 64 bits values for the parsing + */ +void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose, + struct device_node *dev, + int primary) +{ + const u32 *ranges; + int rlen; + int pna = of_n_addr_cells(dev); + int np = pna + 5; + int memno = 0, isa_hole = -1; + u32 pci_space; + unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size; + unsigned long long isa_mb = 0; + struct resource *res; + + printk(KERN_INFO "PCI host bridge %s %s ranges:\n", + dev->full_name, primary ? "(primary)" : ""); + + /* Get ranges property */ + ranges = of_get_property(dev, "ranges", &rlen); + if (ranges == NULL) + return; + + /* Parse it */ + pr_debug("Parsing ranges property...\n"); + while ((rlen -= np * 4) >= 0) { + /* Read next ranges element */ + pci_space = ranges[0]; + pci_addr = of_read_number(ranges + 1, 2); + cpu_addr = of_translate_address(dev, ranges + 3); + size = of_read_number(ranges + pna + 3, 2); + + pr_debug("pci_space: 0x%08x pci_addr:0x%016llx " + "cpu_addr:0x%016llx size:0x%016llx\n", + pci_space, pci_addr, cpu_addr, size); + + ranges += np; + + /* If we failed translation or got a zero-sized region + * (some FW try to feed us with non sensical zero sized regions + * such as power3 which look like some kind of attempt + * at exposing the VGA memory hole) + */ + if (cpu_addr == OF_BAD_ADDR || size == 0) + continue; + + /* Now consume following elements while they are contiguous */ + for (; rlen >= np * sizeof(u32); + ranges += np, rlen -= np * 4) { + if (ranges[0] != pci_space) + break; + pci_next = of_read_number(ranges + 1, 2); + cpu_next = of_translate_address(dev, ranges + 3); + if (pci_next != pci_addr + size || + cpu_next != cpu_addr + size) + break; + size += of_read_number(ranges + pna + 3, 2); + } + + /* Act based on address space type */ + res = NULL; + switch ((pci_space >> 24) & 0x3) { + case 1: /* PCI IO space */ + printk(KERN_INFO + " IO 0x%016llx..0x%016llx -> 0x%016llx\n", + cpu_addr, cpu_addr + size - 1, pci_addr); + + /* We support only one IO range */ + if (hose->pci_io_size) { + printk(KERN_INFO + " \\--> Skipped (too many) !\n"); + continue; + } + /* On 32 bits, limit I/O space to 16MB */ + if (size > 0x01000000) + size = 0x01000000; + + /* 32 bits needs to map IOs here */ + hose->io_base_virt = ioremap(cpu_addr, size); + + /* Expect trouble if pci_addr is not 0 */ + if (primary) + isa_io_base = + (unsigned long)hose->io_base_virt; + /* pci_io_size and io_base_phys always represent IO + * space starting at 0 so we factor in pci_addr + */ + hose->pci_io_size = pci_addr + size; + hose->io_base_phys = cpu_addr - pci_addr; + + /* Build resource */ + res = &hose->io_resource; + res->flags = IORESOURCE_IO; + res->start = pci_addr; + break; + case 2: /* PCI Memory space */ + case 3: /* PCI 64 bits Memory space */ + printk(KERN_INFO + " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n", + cpu_addr, cpu_addr + size - 1, pci_addr, + (pci_space & 0x40000000) ? "Prefetch" : ""); + + /* We support only 3 memory ranges */ + if (memno >= 3) { + printk(KERN_INFO + " \\--> Skipped (too many) !\n"); + continue; + } + /* Handles ISA memory hole space here */ + if (pci_addr == 0) { + isa_mb = cpu_addr; + isa_hole = memno; + if (primary || isa_mem_base == 0) + isa_mem_base = cpu_addr; + hose->isa_mem_phys = cpu_addr; + hose->isa_mem_size = size; + } + + /* We get the PCI/Mem offset from the first range or + * the, current one if the offset came from an ISA + * hole. If they don't match, bugger. + */ + if (memno == 0 || + (isa_hole >= 0 && pci_addr != 0 && + hose->pci_mem_offset == isa_mb)) + hose->pci_mem_offset = cpu_addr - pci_addr; + else if (pci_addr != 0 && + hose->pci_mem_offset != cpu_addr - pci_addr) { + printk(KERN_INFO + " \\--> Skipped (offset mismatch) !\n"); + continue; + } + + /* Build resource */ + res = &hose->mem_resources[memno++]; + res->flags = IORESOURCE_MEM; + if (pci_space & 0x40000000) + res->flags |= IORESOURCE_PREFETCH; + res->start = cpu_addr; + break; + } + if (res != NULL) { + res->name = dev->full_name; + res->end = res->start + size - 1; + res->parent = NULL; + res->sibling = NULL; + res->child = NULL; + } + } + + /* If there's an ISA hole and the pci_mem_offset is -not- matching + * the ISA hole offset, then we need to remove the ISA hole from + * the resource list for that brige + */ + if (isa_hole >= 0 && hose->pci_mem_offset != isa_mb) { + unsigned int next = isa_hole + 1; + printk(KERN_INFO " Removing ISA hole at 0x%016llx\n", isa_mb); + if (next < memno) + memmove(&hose->mem_resources[isa_hole], + &hose->mem_resources[next], + sizeof(struct resource) * (memno - next)); + hose->mem_resources[--memno].flags = 0; + } +} + +/* Decide whether to display the domain number in /proc */ +int pci_proc_domain(struct pci_bus *bus) +{ + struct pci_controller *hose = pci_bus_to_host(bus); + + if (!(pci_flags & PCI_ENABLE_PROC_DOMAINS)) + return 0; + if (pci_flags & PCI_COMPAT_DOMAIN_0) + return hose->global_number != 0; + return 1; +} + +void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, + struct resource *res) +{ + resource_size_t offset = 0, mask = (resource_size_t)-1; + struct pci_controller *hose = pci_bus_to_host(dev->bus); + + if (!hose) + return; + if (res->flags & IORESOURCE_IO) { + offset = (unsigned long)hose->io_base_virt - _IO_BASE; + mask = 0xffffffffu; + } else if (res->flags & IORESOURCE_MEM) + offset = hose->pci_mem_offset; + + region->start = (res->start - offset) & mask; + region->end = (res->end - offset) & mask; +} +EXPORT_SYMBOL(pcibios_resource_to_bus); + +void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, + struct pci_bus_region *region) +{ + resource_size_t offset = 0, mask = (resource_size_t)-1; + struct pci_controller *hose = pci_bus_to_host(dev->bus); + + if (!hose) + return; + if (res->flags & IORESOURCE_IO) { + offset = (unsigned long)hose->io_base_virt - _IO_BASE; + mask = 0xffffffffu; + } else if (res->flags & IORESOURCE_MEM) + offset = hose->pci_mem_offset; + res->start = (region->start + offset) & mask; + res->end = (region->end + offset) & mask; +} +EXPORT_SYMBOL(pcibios_bus_to_resource); + +/* Fixup a bus resource into a linux resource */ +static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev) +{ + struct pci_controller *hose = pci_bus_to_host(dev->bus); + resource_size_t offset = 0, mask = (resource_size_t)-1; + + if (res->flags & IORESOURCE_IO) { + offset = (unsigned long)hose->io_base_virt - _IO_BASE; + mask = 0xffffffffu; + } else if (res->flags & IORESOURCE_MEM) + offset = hose->pci_mem_offset; + + res->start = (res->start + offset) & mask; + res->end = (res->end + offset) & mask; +} + +/* This header fixup will do the resource fixup for all devices as they are + * probed, but not for bridge ranges + */ +static void __devinit pcibios_fixup_resources(struct pci_dev *dev) +{ + struct pci_controller *hose = pci_bus_to_host(dev->bus); + int i; + + if (!hose) { + printk(KERN_ERR "No host bridge for PCI dev %s !\n", + pci_name(dev)); + return; + } + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { + struct resource *res = dev->resource + i; + if (!res->flags) + continue; + /* On platforms that have PCI_PROBE_ONLY set, we don't + * consider 0 as an unassigned BAR value. It's technically + * a valid value, but linux doesn't like it... so when we can + * re-assign things, we do so, but if we can't, we keep it + * around and hope for the best... + */ + if (res->start == 0 && !(pci_flags & PCI_PROBE_ONLY)) { + pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]" \ + "is unassigned\n", + pci_name(dev), i, + (unsigned long long)res->start, + (unsigned long long)res->end, + (unsigned int)res->flags); + res->end -= res->start; + res->start = 0; + res->flags |= IORESOURCE_UNSET; + continue; + } + + pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] fixup...\n", + pci_name(dev), i, + (unsigned long long)res->start,\ + (unsigned long long)res->end, + (unsigned int)res->flags); + + fixup_resource(res, dev); + + pr_debug("PCI:%s %016llx-%016llx\n", + pci_name(dev), + (unsigned long long)res->start, + (unsigned long long)res->end); + } +} +DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources); + +/* This function tries to figure out if a bridge resource has been initialized + * by the firmware or not. It doesn't have to be absolutely bullet proof, but + * things go more smoothly when it gets it right. It should covers cases such + * as Apple "closed" bridge resources and bare-metal pSeries unassigned bridges + */ +static int __devinit pcibios_uninitialized_bridge_resource(struct pci_bus *bus, + struct resource *res) +{ + struct pci_controller *hose = pci_bus_to_host(bus); + struct pci_dev *dev = bus->self; + resource_size_t offset; + u16 command; + int i; + + /* We don't do anything if PCI_PROBE_ONLY is set */ + if (pci_flags & PCI_PROBE_ONLY) + return 0; + + /* Job is a bit different between memory and IO */ + if (res->flags & IORESOURCE_MEM) { + /* If the BAR is non-0 (res != pci_mem_offset) then it's + * probably been initialized by somebody + */ + if (res->start != hose->pci_mem_offset) + return 0; + + /* The BAR is 0, let's check if memory decoding is enabled on + * the bridge. If not, we consider it unassigned + */ + pci_read_config_word(dev, PCI_COMMAND, &command); + if ((command & PCI_COMMAND_MEMORY) == 0) + return 1; + + /* Memory decoding is enabled and the BAR is 0. If any of + * the bridge resources covers that starting address (0 then + * it's good enough for us for memory + */ + for (i = 0; i < 3; i++) { + if ((hose->mem_resources[i].flags & IORESOURCE_MEM) && + hose->mem_resources[i].start == hose->pci_mem_offset) + return 0; + } + + /* Well, it starts at 0 and we know it will collide so we may as + * well consider it as unassigned. That covers the Apple case. + */ + return 1; + } else { + /* If the BAR is non-0, then we consider it assigned */ + offset = (unsigned long)hose->io_base_virt - _IO_BASE; + if (((res->start - offset) & 0xfffffffful) != 0) + return 0; + + /* Here, we are a bit different than memory as typically IO + * space starting at low addresses -is- valid. What we do + * instead if that we consider as unassigned anything that + * doesn't have IO enabled in the PCI command register, + * and that's it. + */ + pci_read_config_word(dev, PCI_COMMAND, &command); + if (command & PCI_COMMAND_IO) + return 0; + + /* It's starting at 0 and IO is disabled in the bridge, consider + * it unassigned + */ + return 1; + } +} + +/* Fixup resources of a PCI<->PCI bridge */ +static void __devinit pcibios_fixup_bridge(struct pci_bus *bus) +{ + struct resource *res; + int i; + + struct pci_dev *dev = bus->self; + + for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) { + res = bus->resource[i]; + if (!res) + continue; + if (!res->flags) + continue; + if (i >= 3 && bus->self->transparent) + continue; + + pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x] fixup...\n", + pci_name(dev), i, + (unsigned long long)res->start,\ + (unsigned long long)res->end, + (unsigned int)res->flags); + + /* Perform fixup */ + fixup_resource(res, dev); + + /* Try to detect uninitialized P2P bridge resources, + * and clear them out so they get re-assigned later + */ + if (pcibios_uninitialized_bridge_resource(bus, res)) { + res->flags = 0; + pr_debug("PCI:%s (unassigned)\n", + pci_name(dev)); + } else { + pr_debug("PCI:%s %016llx-%016llx\n", + pci_name(dev), + (unsigned long long)res->start, + (unsigned long long)res->end); + } + } +} + +void __devinit pcibios_setup_bus_self(struct pci_bus *bus) +{ + /* Fix up the bus resources for P2P bridges */ + if (bus->self != NULL) + pcibios_fixup_bridge(bus); +} + +void __devinit pcibios_setup_bus_devices(struct pci_bus *bus) +{ + struct pci_dev *dev; + + pr_debug("PCI: Fixup bus devices %d (%s)\n", + bus->number, bus->self ? pci_name(bus->self) : "PHB"); + + list_for_each_entry(dev, &bus->devices, bus_list) { + struct dev_archdata *sd = &dev->dev.archdata; + + /* Setup OF node pointer in archdata */ + sd->of_node = pci_device_to_OF_node(dev); + + /* Fixup NUMA node as it may not be setup yet by the generic + * code and is needed by the DMA init + */ + set_dev_node(&dev->dev, pcibus_to_node(dev->bus)); + + /* Hook up default DMA ops */ + sd->dma_ops = pci_dma_ops; + sd->dma_data = (void *)PCI_DRAM_OFFSET; + + /* Read default IRQs and fixup if necessary */ + pci_read_irq_line(dev); + } +} + +void __devinit pcibios_fixup_bus(struct pci_bus *bus) +{ + /* When called from the generic PCI probe, read PCI<->PCI bridge + * bases. This is -not- called when generating the PCI tree from + * the OF device-tree. + */ + if (bus->self != NULL) + pci_read_bridge_bases(bus); + + /* Now fixup the bus bus */ + pcibios_setup_bus_self(bus); + + /* Now fixup devices on that bus */ + pcibios_setup_bus_devices(bus); +} +EXPORT_SYMBOL(pcibios_fixup_bus); + +static int skip_isa_ioresource_align(struct pci_dev *dev) +{ + if ((pci_flags & PCI_CAN_SKIP_ISA_ALIGN) && + !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA)) + return 1; + return 0; +} + +/* + * We need to avoid collisions with `mirrored' VGA ports + * and other strange ISA hardware, so we always want the + * addresses to be allocated in the 0x000-0x0ff region + * modulo 0x400. + * + * Why? Because some silly external IO cards only decode + * the low 10 bits of the IO address. The 0x00-0xff region + * is reserved for motherboard devices that decode all 16 + * bits, so it's ok to allocate at, say, 0x2800-0x28ff, + * but we want to try to avoid allocating at 0x2900-0x2bff + * which might have be mirrored at 0x0100-0x03ff.. + */ +void pcibios_align_resource(void *data, struct resource *res, + resource_size_t size, resource_size_t align) +{ + struct pci_dev *dev = data; + + if (res->flags & IORESOURCE_IO) { + resource_size_t start = res->start; + + if (skip_isa_ioresource_align(dev)) + return; + if (start & 0x300) { + start = (start + 0x3ff) & ~0x3ff; + res->start = start; + } + } +} +EXPORT_SYMBOL(pcibios_align_resource); + +/* + * Reparent resource children of pr that conflict with res + * under res, and make res replace those children. + */ +static int __init reparent_resources(struct resource *parent, + struct resource *res) +{ + struct resource *p, **pp; + struct resource **firstpp = NULL; + + for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) { + if (p->end < res->start) + continue; + if (res->end < p->start) + break; + if (p->start < res->start || p->end > res->end) + return -1; /* not completely contained */ + if (firstpp == NULL) + firstpp = pp; + } + if (firstpp == NULL) + return -1; /* didn't find any conflicting entries? */ + res->parent = parent; + res->child = *firstpp; + res->sibling = *pp; + *firstpp = res; + *pp = NULL; + for (p = res->child; p != NULL; p = p->sibling) { + p->parent = res; + pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n", + p->name, + (unsigned long long)p->start, + (unsigned long long)p->end, res->name); + } + return 0; +} + +/* + * Handle resources of PCI devices. If the world were perfect, we could + * just allocate all the resource regions and do nothing more. It isn't. + * On the other hand, we cannot just re-allocate all devices, as it would + * require us to know lots of host bridge internals. So we attempt to + * keep as much of the original configuration as possible, but tweak it + * when it's found to be wrong. + * + * Known BIOS problems we have to work around: + * - I/O or memory regions not configured + * - regions configured, but not enabled in the command register + * - bogus I/O addresses above 64K used + * - expansion ROMs left enabled (this may sound harmless, but given + * the fact the PCI specs explicitly allow address decoders to be + * shared between expansion ROMs and other resource regions, it's + * at least dangerous) + * + * Our solution: + * (1) Allocate resources for all buses behind PCI-to-PCI bridges. + * This gives us fixed barriers on where we can allocate. + * (2) Allocate resources for all enabled devices. If there is + * a collision, just mark the resource as unallocated. Also + * disable expansion ROMs during this step. + * (3) Try to allocate resources for disabled devices. If the + * resources were assigned correctly, everything goes well, + * if they weren't, they won't disturb allocation of other + * resources. + * (4) Assign new addresses to resources which were either + * not configured at all or misconfigured. If explicitly + * requested by the user, configure expansion ROM address + * as well. + */ + +void pcibios_allocate_bus_resources(struct pci_bus *bus) +{ + struct pci_bus *b; + int i; + struct resource *res, *pr; + + pr_debug("PCI: Allocating bus resources for %04x:%02x...\n", + pci_domain_nr(bus), bus->number); + + for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) { + res = bus->resource[i]; + if (!res || !res->flags + || res->start > res->end || res->parent) + continue; + if (bus->parent == NULL) + pr = (res->flags & IORESOURCE_IO) ? + &ioport_resource : &iomem_resource; + else { + /* Don't bother with non-root busses when + * re-assigning all resources. We clear the + * resource flags as if they were colliding + * and as such ensure proper re-allocation + * later. + */ + if (pci_flags & PCI_REASSIGN_ALL_RSRC) + goto clear_resource; + pr = pci_find_parent_resource(bus->self, res); + if (pr == res) { + /* this happens when the generic PCI + * code (wrongly) decides that this + * bridge is transparent -- paulus + */ + continue; + } + } + + pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx " + "[0x%x], parent %p (%s)\n", + bus->self ? pci_name(bus->self) : "PHB", + bus->number, i, + (unsigned long long)res->start, + (unsigned long long)res->end, + (unsigned int)res->flags, + pr, (pr && pr->name) ? pr->name : "nil"); + + if (pr && !(pr->flags & IORESOURCE_UNSET)) { + if (request_resource(pr, res) == 0) + continue; + /* + * Must be a conflict with an existing entry. + * Move that entry (or entries) under the + * bridge resource and try again. + */ + if (reparent_resources(pr, res) == 0) + continue; + } + printk(KERN_WARNING "PCI: Cannot allocate resource region " + "%d of PCI bridge %d, will remap\n", i, bus->number); +clear_resource: + res->flags = 0; + } + + list_for_each_entry(b, &bus->children, node) + pcibios_allocate_bus_resources(b); +} + +static inline void __devinit alloc_resource(struct pci_dev *dev, int idx) +{ + struct resource *pr, *r = &dev->resource[idx]; + + pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n", + pci_name(dev), idx, + (unsigned long long)r->start, + (unsigned long long)r->end, + (unsigned int)r->flags); + + pr = pci_find_parent_resource(dev, r); + if (!pr || (pr->flags & IORESOURCE_UNSET) || + request_resource(pr, r) < 0) { + printk(KERN_WARNING "PCI: Cannot allocate resource region %d" + " of device %s, will remap\n", idx, pci_name(dev)); + if (pr) + pr_debug("PCI: parent is %p: %016llx-%016llx [%x]\n", + pr, + (unsigned long long)pr->start, + (unsigned long long)pr->end, + (unsigned int)pr->flags); + /* We'll assign a new address later */ + r->flags |= IORESOURCE_UNSET; + r->end -= r->start; + r->start = 0; + } +} + +static void __init pcibios_allocate_resources(int pass) +{ + struct pci_dev *dev = NULL; + int idx, disabled; + u16 command; + struct resource *r; + + for_each_pci_dev(dev) { + pci_read_config_word(dev, PCI_COMMAND, &command); + for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) { + r = &dev->resource[idx]; + if (r->parent) /* Already allocated */ + continue; + if (!r->flags || (r->flags & IORESOURCE_UNSET)) + continue; /* Not assigned at all */ + /* We only allocate ROMs on pass 1 just in case they + * have been screwed up by firmware + */ + if (idx == PCI_ROM_RESOURCE) + disabled = 1; + if (r->flags & IORESOURCE_IO) + disabled = !(command & PCI_COMMAND_IO); + else + disabled = !(command & PCI_COMMAND_MEMORY); + if (pass == disabled) + alloc_resource(dev, idx); + } + if (pass) + continue; + r = &dev->resource[PCI_ROM_RESOURCE]; + if (r->flags) { + /* Turn the ROM off, leave the resource region, + * but keep it unregistered. + */ + u32 reg; + pci_read_config_dword(dev, dev->rom_base_reg, ®); + if (reg & PCI_ROM_ADDRESS_ENABLE) { + pr_debug("PCI: Switching off ROM of %s\n", + pci_name(dev)); + r->flags &= ~IORESOURCE_ROM_ENABLE; + pci_write_config_dword(dev, dev->rom_base_reg, + reg & ~PCI_ROM_ADDRESS_ENABLE); + } + } + } +} + +static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus) +{ + struct pci_controller *hose = pci_bus_to_host(bus); + resource_size_t offset; + struct resource *res, *pres; + int i; + + pr_debug("Reserving legacy ranges for domain %04x\n", + pci_domain_nr(bus)); + + /* Check for IO */ + if (!(hose->io_resource.flags & IORESOURCE_IO)) + goto no_io; + offset = (unsigned long)hose->io_base_virt - _IO_BASE; + res = kzalloc(sizeof(struct resource), GFP_KERNEL); + BUG_ON(res == NULL); + res->name = "Legacy IO"; + res->flags = IORESOURCE_IO; + res->start = offset; + res->end = (offset + 0xfff) & 0xfffffffful; + pr_debug("Candidate legacy IO: %pR\n", res); + if (request_resource(&hose->io_resource, res)) { + printk(KERN_DEBUG + "PCI %04x:%02x Cannot reserve Legacy IO %pR\n", + pci_domain_nr(bus), bus->number, res); + kfree(res); + } + + no_io: + /* Check for memory */ + offset = hose->pci_mem_offset; + pr_debug("hose mem offset: %016llx\n", (unsigned long long)offset); + for (i = 0; i < 3; i++) { + pres = &hose->mem_resources[i]; + if (!(pres->flags & IORESOURCE_MEM)) + continue; + pr_debug("hose mem res: %pR\n", pres); + if ((pres->start - offset) <= 0xa0000 && + (pres->end - offset) >= 0xbffff) + break; + } + if (i >= 3) + return; + res = kzalloc(sizeof(struct resource), GFP_KERNEL); + BUG_ON(res == NULL); + res->name = "Legacy VGA memory"; + res->flags = IORESOURCE_MEM; + res->start = 0xa0000 + offset; + res->end = 0xbffff + offset; + pr_debug("Candidate VGA memory: %pR\n", res); + if (request_resource(pres, res)) { + printk(KERN_DEBUG + "PCI %04x:%02x Cannot reserve VGA memory %pR\n", + pci_domain_nr(bus), bus->number, res); + kfree(res); + } +} + +void __init pcibios_resource_survey(void) +{ + struct pci_bus *b; + + /* Allocate and assign resources. If we re-assign everything, then + * we skip the allocate phase + */ + list_for_each_entry(b, &pci_root_buses, node) + pcibios_allocate_bus_resources(b); + + if (!(pci_flags & PCI_REASSIGN_ALL_RSRC)) { + pcibios_allocate_resources(0); + pcibios_allocate_resources(1); + } + + /* Before we start assigning unassigned resource, we try to reserve + * the low IO area and the VGA memory area if they intersect the + * bus available resources to avoid allocating things on top of them + */ + if (!(pci_flags & PCI_PROBE_ONLY)) { + list_for_each_entry(b, &pci_root_buses, node) + pcibios_reserve_legacy_regions(b); + } + + /* Now, if the platform didn't decide to blindly trust the firmware, + * we proceed to assigning things that were left unassigned + */ + if (!(pci_flags & PCI_PROBE_ONLY)) { + pr_debug("PCI: Assigning unassigned resources...\n"); + pci_assign_unassigned_resources(); + } +} + +#ifdef CONFIG_HOTPLUG + +/* This is used by the PCI hotplug driver to allocate resource + * of newly plugged busses. We can try to consolidate with the + * rest of the code later, for now, keep it as-is as our main + * resource allocation function doesn't deal with sub-trees yet. + */ +void __devinit pcibios_claim_one_bus(struct pci_bus *bus) +{ + struct pci_dev *dev; + struct pci_bus *child_bus; + + list_for_each_entry(dev, &bus->devices, bus_list) { + int i; + + for (i = 0; i < PCI_NUM_RESOURCES; i++) { + struct resource *r = &dev->resource[i]; + + if (r->parent || !r->start || !r->flags) + continue; + + pr_debug("PCI: Claiming %s: " + "Resource %d: %016llx..%016llx [%x]\n", + pci_name(dev), i, + (unsigned long long)r->start, + (unsigned long long)r->end, + (unsigned int)r->flags); + + pci_claim_resource(dev, i); + } + } + + list_for_each_entry(child_bus, &bus->children, node) + pcibios_claim_one_bus(child_bus); +} +EXPORT_SYMBOL_GPL(pcibios_claim_one_bus); + + +/* pcibios_finish_adding_to_bus + * + * This is to be called by the hotplug code after devices have been + * added to a bus, this include calling it for a PHB that is just + * being added + */ +void pcibios_finish_adding_to_bus(struct pci_bus *bus) +{ + pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n", + pci_domain_nr(bus), bus->number); + + /* Allocate bus and devices resources */ + pcibios_allocate_bus_resources(bus); + pcibios_claim_one_bus(bus); + + /* Add new devices to global lists. Register in proc, sysfs. */ + pci_bus_add_devices(bus); + + /* Fixup EEH */ + eeh_add_device_tree_late(bus); +} +EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus); + +#endif /* CONFIG_HOTPLUG */ + +int pcibios_enable_device(struct pci_dev *dev, int mask) +{ + return pci_enable_resources(dev, mask); +} + +void __devinit pcibios_setup_phb_resources(struct pci_controller *hose) +{ + struct pci_bus *bus = hose->bus; + struct resource *res; + int i; + + /* Hookup PHB IO resource */ + bus->resource[0] = res = &hose->io_resource; + + if (!res->flags) { + printk(KERN_WARNING "PCI: I/O resource not set for host" + " bridge %s (domain %d)\n", + hose->dn->full_name, hose->global_number); + /* Workaround for lack of IO resource only on 32-bit */ + res->start = (unsigned long)hose->io_base_virt - isa_io_base; + res->end = res->start + IO_SPACE_LIMIT; + res->flags = IORESOURCE_IO; + } + + pr_debug("PCI: PHB IO resource = %016llx-%016llx [%lx]\n", + (unsigned long long)res->start, + (unsigned long long)res->end, + (unsigned long)res->flags); + + /* Hookup PHB Memory resources */ + for (i = 0; i < 3; ++i) { + res = &hose->mem_resources[i]; + if (!res->flags) { + if (i > 0) + continue; + printk(KERN_ERR "PCI: Memory resource 0 not set for " + "host bridge %s (domain %d)\n", + hose->dn->full_name, hose->global_number); + + /* Workaround for lack of MEM resource only on 32-bit */ + res->start = hose->pci_mem_offset; + res->end = (resource_size_t)-1LL; + res->flags = IORESOURCE_MEM; + + } + bus->resource[i+1] = res; + + pr_debug("PCI: PHB MEM resource %d = %016llx-%016llx [%lx]\n", + i, (unsigned long long)res->start, + (unsigned long long)res->end, + (unsigned long)res->flags); + } + + pr_debug("PCI: PHB MEM offset = %016llx\n", + (unsigned long long)hose->pci_mem_offset); + pr_debug("PCI: PHB IO offset = %08lx\n", + (unsigned long)hose->io_base_virt - _IO_BASE); +} + +/* + * Null PCI config access functions, for the case when we can't + * find a hose. + */ +#define NULL_PCI_OP(rw, size, type) \ +static int \ +null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \ +{ \ + return PCIBIOS_DEVICE_NOT_FOUND; \ +} + +static int +null_read_config(struct pci_bus *bus, unsigned int devfn, int offset, + int len, u32 *val) +{ + return PCIBIOS_DEVICE_NOT_FOUND; +} + +static int +null_write_config(struct pci_bus *bus, unsigned int devfn, int offset, + int len, u32 val) +{ + return PCIBIOS_DEVICE_NOT_FOUND; +} + +static struct pci_ops null_pci_ops = { + .read = null_read_config, + .write = null_write_config, +}; + +/* + * These functions are used early on before PCI scanning is done + * and all of the pci_dev and pci_bus structures have been created. + */ +static struct pci_bus * +fake_pci_bus(struct pci_controller *hose, int busnr) +{ + static struct pci_bus bus; + + if (!hose) + printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr); + + bus.number = busnr; + bus.sysdata = hose; + bus.ops = hose ? hose->ops : &null_pci_ops; + return &bus; +} + +#define EARLY_PCI_OP(rw, size, type) \ +int early_##rw##_config_##size(struct pci_controller *hose, int bus, \ + int devfn, int offset, type value) \ +{ \ + return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \ + devfn, offset, value); \ +} + +EARLY_PCI_OP(read, byte, u8 *) +EARLY_PCI_OP(read, word, u16 *) +EARLY_PCI_OP(read, dword, u32 *) +EARLY_PCI_OP(write, byte, u8) +EARLY_PCI_OP(write, word, u16) +EARLY_PCI_OP(write, dword, u32) + +int early_find_capability(struct pci_controller *hose, int bus, int devfn, + int cap) +{ + return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap); +} diff --git a/arch/microblaze/pci/pci_32.c b/arch/microblaze/pci/pci_32.c new file mode 100644 index 000000000000..7e0c94f501cc --- /dev/null +++ b/arch/microblaze/pci/pci_32.c @@ -0,0 +1,430 @@ +/* + * Common pmac/prep/chrp pci routines. -- Cort + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#undef DEBUG + +unsigned long isa_io_base; +unsigned long pci_dram_offset; +int pcibios_assign_bus_offset = 1; + +static u8 *pci_to_OF_bus_map; + +/* By default, we don't re-assign bus numbers. We do this only on + * some pmacs + */ +static int pci_assign_all_buses; + +static int pci_bus_count; + +/* + * Functions below are used on OpenFirmware machines. + */ +static void +make_one_node_map(struct device_node *node, u8 pci_bus) +{ + const int *bus_range; + int len; + + if (pci_bus >= pci_bus_count) + return; + bus_range = of_get_property(node, "bus-range", &len); + if (bus_range == NULL || len < 2 * sizeof(int)) { + printk(KERN_WARNING "Can't get bus-range for %s, " + "assuming it starts at 0\n", node->full_name); + pci_to_OF_bus_map[pci_bus] = 0; + } else + pci_to_OF_bus_map[pci_bus] = bus_range[0]; + + for_each_child_of_node(node, node) { + struct pci_dev *dev; + const unsigned int *class_code, *reg; + + class_code = of_get_property(node, "class-code", NULL); + if (!class_code || + ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI && + (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) + continue; + reg = of_get_property(node, "reg", NULL); + if (!reg) + continue; + dev = pci_get_bus_and_slot(pci_bus, ((reg[0] >> 8) & 0xff)); + if (!dev || !dev->subordinate) { + pci_dev_put(dev); + continue; + } + make_one_node_map(node, dev->subordinate->number); + pci_dev_put(dev); + } +} + +void +pcibios_make_OF_bus_map(void) +{ + int i; + struct pci_controller *hose, *tmp; + struct property *map_prop; + struct device_node *dn; + + pci_to_OF_bus_map = kmalloc(pci_bus_count, GFP_KERNEL); + if (!pci_to_OF_bus_map) { + printk(KERN_ERR "Can't allocate OF bus map !\n"); + return; + } + + /* We fill the bus map with invalid values, that helps + * debugging. + */ + for (i = 0; i < pci_bus_count; i++) + pci_to_OF_bus_map[i] = 0xff; + + /* For each hose, we begin searching bridges */ + list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { + struct device_node *node = hose->dn; + + if (!node) + continue; + make_one_node_map(node, hose->first_busno); + } + dn = of_find_node_by_path("/"); + map_prop = of_find_property(dn, "pci-OF-bus-map", NULL); + if (map_prop) { + BUG_ON(pci_bus_count > map_prop->length); + memcpy(map_prop->value, pci_to_OF_bus_map, pci_bus_count); + } + of_node_put(dn); +#ifdef DEBUG + printk(KERN_INFO "PCI->OF bus map:\n"); + for (i = 0; i < pci_bus_count; i++) { + if (pci_to_OF_bus_map[i] == 0xff) + continue; + printk(KERN_INFO "%d -> %d\n", i, pci_to_OF_bus_map[i]); + } +#endif +} + +typedef int (*pci_OF_scan_iterator)(struct device_node *node, void *data); + +static struct device_node *scan_OF_pci_childs(struct device_node *parent, + pci_OF_scan_iterator filter, void *data) +{ + struct device_node *node; + struct device_node *sub_node; + + for_each_child_of_node(parent, node) { + const unsigned int *class_code; + + if (filter(node, data)) { + of_node_put(node); + return node; + } + + /* For PCI<->PCI bridges or CardBus bridges, we go down + * Note: some OFs create a parent node "multifunc-device" as + * a fake root for all functions of a multi-function device, + * we go down them as well. + */ + class_code = of_get_property(node, "class-code", NULL); + if ((!class_code || + ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI && + (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) && + strcmp(node->name, "multifunc-device")) + continue; + sub_node = scan_OF_pci_childs(node, filter, data); + if (sub_node) { + of_node_put(node); + return sub_node; + } + } + return NULL; +} + +static struct device_node *scan_OF_for_pci_dev(struct device_node *parent, + unsigned int devfn) +{ + struct device_node *np, *cnp; + const u32 *reg; + unsigned int psize; + + for_each_child_of_node(parent, np) { + reg = of_get_property(np, "reg", &psize); + if (reg && psize >= 4 && ((reg[0] >> 8) & 0xff) == devfn) + return np; + + /* Note: some OFs create a parent node "multifunc-device" as + * a fake root for all functions of a multi-function device, + * we go down them as well. */ + if (!strcmp(np->name, "multifunc-device")) { + cnp = scan_OF_for_pci_dev(np, devfn); + if (cnp) + return cnp; + } + } + return NULL; +} + + +static struct device_node *scan_OF_for_pci_bus(struct pci_bus *bus) +{ + struct device_node *parent, *np; + + /* Are we a root bus ? */ + if (bus->self == NULL || bus->parent == NULL) { + struct pci_controller *hose = pci_bus_to_host(bus); + if (hose == NULL) + return NULL; + return of_node_get(hose->dn); + } + + /* not a root bus, we need to get our parent */ + parent = scan_OF_for_pci_bus(bus->parent); + if (parent == NULL) + return NULL; + + /* now iterate for children for a match */ + np = scan_OF_for_pci_dev(parent, bus->self->devfn); + of_node_put(parent); + + return np; +} + +/* + * Scans the OF tree for a device node matching a PCI device + */ +struct device_node * +pci_busdev_to_OF_node(struct pci_bus *bus, int devfn) +{ + struct device_node *parent, *np; + + pr_debug("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn); + parent = scan_OF_for_pci_bus(bus); + if (parent == NULL) + return NULL; + pr_debug(" parent is %s\n", parent ? parent->full_name : ""); + np = scan_OF_for_pci_dev(parent, devfn); + of_node_put(parent); + pr_debug(" result is %s\n", np ? np->full_name : ""); + + /* XXX most callers don't release the returned node + * mostly because ppc64 doesn't increase the refcount, + * we need to fix that. + */ + return np; +} +EXPORT_SYMBOL(pci_busdev_to_OF_node); + +struct device_node* +pci_device_to_OF_node(struct pci_dev *dev) +{ + return pci_busdev_to_OF_node(dev->bus, dev->devfn); +} +EXPORT_SYMBOL(pci_device_to_OF_node); + +static int +find_OF_pci_device_filter(struct device_node *node, void *data) +{ + return ((void *)node == data); +} + +/* + * Returns the PCI device matching a given OF node + */ +int +pci_device_from_OF_node(struct device_node *node, u8 *bus, u8 *devfn) +{ + const unsigned int *reg; + struct pci_controller *hose; + struct pci_dev *dev = NULL; + + /* Make sure it's really a PCI device */ + hose = pci_find_hose_for_OF_device(node); + if (!hose || !hose->dn) + return -ENODEV; + if (!scan_OF_pci_childs(hose->dn, + find_OF_pci_device_filter, (void *)node)) + return -ENODEV; + reg = of_get_property(node, "reg", NULL); + if (!reg) + return -ENODEV; + *bus = (reg[0] >> 16) & 0xff; + *devfn = ((reg[0] >> 8) & 0xff); + + /* Ok, here we need some tweak. If we have already renumbered + * all busses, we can't rely on the OF bus number any more. + * the pci_to_OF_bus_map is not enough as several PCI busses + * may match the same OF bus number. + */ + if (!pci_to_OF_bus_map) + return 0; + + for_each_pci_dev(dev) + if (pci_to_OF_bus_map[dev->bus->number] == *bus && + dev->devfn == *devfn) { + *bus = dev->bus->number; + pci_dev_put(dev); + return 0; + } + + return -ENODEV; +} +EXPORT_SYMBOL(pci_device_from_OF_node); + +/* We create the "pci-OF-bus-map" property now so it appears in the + * /proc device tree + */ +void __init +pci_create_OF_bus_map(void) +{ + struct property *of_prop; + struct device_node *dn; + + of_prop = (struct property *) alloc_bootmem(sizeof(struct property) + \ + 256); + if (!of_prop) + return; + dn = of_find_node_by_path("/"); + if (dn) { + memset(of_prop, -1, sizeof(struct property) + 256); + of_prop->name = "pci-OF-bus-map"; + of_prop->length = 256; + of_prop->value = &of_prop[1]; + prom_add_property(dn, of_prop); + of_node_put(dn); + } +} + +static void __devinit pcibios_scan_phb(struct pci_controller *hose) +{ + struct pci_bus *bus; + struct device_node *node = hose->dn; + unsigned long io_offset; + struct resource *res = &hose->io_resource; + + pr_debug("PCI: Scanning PHB %s\n", + node ? node->full_name : ""); + + /* Create an empty bus for the toplevel */ + bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, hose); + if (bus == NULL) { + printk(KERN_ERR "Failed to create bus for PCI domain %04x\n", + hose->global_number); + return; + } + bus->secondary = hose->first_busno; + hose->bus = bus; + + /* Fixup IO space offset */ + io_offset = (unsigned long)hose->io_base_virt - isa_io_base; + res->start = (res->start + io_offset) & 0xffffffffu; + res->end = (res->end + io_offset) & 0xffffffffu; + + /* Wire up PHB bus resources */ + pcibios_setup_phb_resources(hose); + + /* Scan children */ + hose->last_busno = bus->subordinate = pci_scan_child_bus(bus); +} + +static int __init pcibios_init(void) +{ + struct pci_controller *hose, *tmp; + int next_busno = 0; + + printk(KERN_INFO "PCI: Probing PCI hardware\n"); + + if (pci_flags & PCI_REASSIGN_ALL_BUS) { + printk(KERN_INFO "setting pci_asign_all_busses\n"); + pci_assign_all_buses = 1; + } + + /* Scan all of the recorded PCI controllers. */ + list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { + if (pci_assign_all_buses) + hose->first_busno = next_busno; + hose->last_busno = 0xff; + pcibios_scan_phb(hose); + printk(KERN_INFO "calling pci_bus_add_devices()\n"); + pci_bus_add_devices(hose->bus); + if (pci_assign_all_buses || next_busno <= hose->last_busno) + next_busno = hose->last_busno + \ + pcibios_assign_bus_offset; + } + pci_bus_count = next_busno; + + /* OpenFirmware based machines need a map of OF bus + * numbers vs. kernel bus numbers since we may have to + * remap them. + */ + if (pci_assign_all_buses) + pcibios_make_OF_bus_map(); + + /* Call common code to handle resource allocation */ + pcibios_resource_survey(); + + return 0; +} + +subsys_initcall(pcibios_init); + +static struct pci_controller* +pci_bus_to_hose(int bus) +{ + struct pci_controller *hose, *tmp; + + list_for_each_entry_safe(hose, tmp, &hose_list, list_node) + if (bus >= hose->first_busno && bus <= hose->last_busno) + return hose; + return NULL; +} + +/* Provide information on locations of various I/O regions in physical + * memory. Do this on a per-card basis so that we choose the right + * root bridge. + * Note that the returned IO or memory base is a physical address + */ + +long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn) +{ + struct pci_controller *hose; + long result = -EOPNOTSUPP; + + hose = pci_bus_to_hose(bus); + if (!hose) + return -ENODEV; + + switch (which) { + case IOBASE_BRIDGE_NUMBER: + return (long)hose->first_busno; + case IOBASE_MEMORY: + return (long)hose->pci_mem_offset; + case IOBASE_IO: + return (long)hose->io_base_phys; + case IOBASE_ISA_IO: + return (long)isa_io_base; + case IOBASE_ISA_MEM: + return (long)isa_mem_base; + } + + return result; +} -- cgit v1.2.3 From a6475c132278c1be158a13872c233aeab8a00176 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Mon, 18 Jan 2010 15:27:10 +0100 Subject: microblaze: Enable PCI, missing files There are two parts of changes. The first is just enable PCI in Makefiles and in Kconfig. The second is the rest of missing files. I didn't want to add it with previous patch because that patch is too big. Current Microblaze toolchain has problem with weak symbols that's why is necessary to apply this changes to be possible to compile pci support. Xilinx knows about this problem. Signed-off-by: Michal Simek --- arch/microblaze/Kconfig | 15 ++++ arch/microblaze/Makefile | 1 + arch/microblaze/include/asm/io.h | 16 +++- arch/microblaze/include/asm/pgtable.h | 15 ++++ arch/microblaze/include/asm/prom.h | 15 ++++ arch/microblaze/pci/Makefile | 5 ++ arch/microblaze/pci/indirect_pci.c | 163 ++++++++++++++++++++++++++++++++++ arch/microblaze/pci/iomap.c | 39 ++++++++ drivers/pci/Makefile | 1 + 9 files changed, 269 insertions(+), 1 deletion(-) create mode 100644 arch/microblaze/pci/Makefile create mode 100644 arch/microblaze/pci/indirect_pci.c create mode 100644 arch/microblaze/pci/iomap.c diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 71ec04137417..e1fa0844ba4e 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -256,6 +256,21 @@ source "fs/Kconfig.binfmt" endmenu +menu "Bus Options" + +config PCI + bool "PCI support" + +config PCI_DOMAINS + def_bool PCI + +config PCI_SYSCALL + def_bool PCI + +source "drivers/pci/Kconfig" + +endmenu + source "net/Kconfig" source "drivers/Kconfig" diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile index d2d6cfcb1a30..836832dd9b26 100644 --- a/arch/microblaze/Makefile +++ b/arch/microblaze/Makefile @@ -50,6 +50,7 @@ libs-y += $(LIBGCC) core-y += arch/microblaze/kernel/ core-y += arch/microblaze/mm/ core-y += arch/microblaze/platform/ +core-$(CONFIG_PCI) += arch/microblaze/pci/ drivers-$(CONFIG_OPROFILE) += arch/microblaze/oprofile/ diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h index f82df5d221a8..06d804b15a51 100644 --- a/arch/microblaze/include/asm/io.h +++ b/arch/microblaze/include/asm/io.h @@ -17,7 +17,21 @@ #include /* Get struct page {...} */ #include -#define PCI_DRAM_OFFSET 0 +#ifndef CONFIG_PCI +#define _IO_BASE 0 +#define _ISA_MEM_BASE 0 +#define PCI_DRAM_OFFSET 0 +#else +#define _IO_BASE isa_io_base +#define _ISA_MEM_BASE isa_mem_base +#define PCI_DRAM_OFFSET pci_dram_offset +#endif + +extern unsigned long isa_io_base; +extern unsigned long pci_io_base; +extern unsigned long pci_dram_offset; + +extern resource_size_t isa_mem_base; #define IO_SPACE_LIMIT (0xFFFFFFFF) diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h index cc3a4dfc3eaa..1c47f6f8bfb6 100644 --- a/arch/microblaze/include/asm/pgtable.h +++ b/arch/microblaze/include/asm/pgtable.h @@ -89,6 +89,21 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } #endif /* __ASSEMBLY__ */ +/* + * Macro to mark a page protection value as "uncacheable". + */ + +#define _PAGE_CACHE_CTL (_PAGE_GUARDED | _PAGE_NO_CACHE | \ + _PAGE_WRITETHRU) + +#define pgprot_noncached(prot) \ + (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ + _PAGE_NO_CACHE | _PAGE_GUARDED)) + +#define pgprot_noncached_wc(prot) \ + (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ + _PAGE_NO_CACHE)) + /* * The MicroBlaze MMU is identical to the PPC-40x MMU, and uses a hash * table containing PTEs, together with a set of 16 segment registers, to diff --git a/arch/microblaze/include/asm/prom.h b/arch/microblaze/include/asm/prom.h index 03f45a963204..e7d67a329bd7 100644 --- a/arch/microblaze/include/asm/prom.h +++ b/arch/microblaze/include/asm/prom.h @@ -31,6 +31,21 @@ /* Other Prototypes */ extern int early_uartlite_console(void); +#ifdef CONFIG_PCI +/* + * PCI <-> OF matching functions + * (XXX should these be here?) + */ +struct pci_bus; +struct pci_dev; +extern int pci_device_from_OF_node(struct device_node *node, + u8 *bus, u8 *devfn); +extern struct device_node *pci_busdev_to_OF_node(struct pci_bus *bus, + int devfn); +extern struct device_node *pci_device_to_OF_node(struct pci_dev *dev); +extern void pci_create_OF_bus_map(void); +#endif + /* * OF address retreival & translation */ diff --git a/arch/microblaze/pci/Makefile b/arch/microblaze/pci/Makefile new file mode 100644 index 000000000000..2b8901864b23 --- /dev/null +++ b/arch/microblaze/pci/Makefile @@ -0,0 +1,5 @@ +# +# Makefile +# + +obj-$(CONFIG_PCI) += pci_32.o pci-common.o indirect_pci.o iomap.o diff --git a/arch/microblaze/pci/indirect_pci.c b/arch/microblaze/pci/indirect_pci.c new file mode 100644 index 000000000000..25f18f017f21 --- /dev/null +++ b/arch/microblaze/pci/indirect_pci.c @@ -0,0 +1,163 @@ +/* + * Support for indirect PCI bridges. + * + * Copyright (C) 1998 Gabriel Paubert. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +static int +indirect_read_config(struct pci_bus *bus, unsigned int devfn, int offset, + int len, u32 *val) +{ + struct pci_controller *hose = pci_bus_to_host(bus); + volatile void __iomem *cfg_data; + u8 cfg_type = 0; + u32 bus_no, reg; + + if (hose->indirect_type & INDIRECT_TYPE_NO_PCIE_LINK) { + if (bus->number != hose->first_busno) + return PCIBIOS_DEVICE_NOT_FOUND; + if (devfn != 0) + return PCIBIOS_DEVICE_NOT_FOUND; + } + + if (hose->indirect_type & INDIRECT_TYPE_SET_CFG_TYPE) + if (bus->number != hose->first_busno) + cfg_type = 1; + + bus_no = (bus->number == hose->first_busno) ? + hose->self_busno : bus->number; + + if (hose->indirect_type & INDIRECT_TYPE_EXT_REG) + reg = ((offset & 0xf00) << 16) | (offset & 0xfc); + else + reg = offset & 0xfc; /* Only 3 bits for function */ + + if (hose->indirect_type & INDIRECT_TYPE_BIG_ENDIAN) + out_be32(hose->cfg_addr, (0x80000000 | (bus_no << 16) | + (devfn << 8) | reg | cfg_type)); + else + out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) | + (devfn << 8) | reg | cfg_type)); + + /* + * Note: the caller has already checked that offset is + * suitably aligned and that len is 1, 2 or 4. + */ + cfg_data = hose->cfg_data + (offset & 3); /* Only 3 bits for function */ + switch (len) { + case 1: + *val = in_8(cfg_data); + break; + case 2: + *val = in_le16(cfg_data); + break; + default: + *val = in_le32(cfg_data); + break; + } + return PCIBIOS_SUCCESSFUL; +} + +static int +indirect_write_config(struct pci_bus *bus, unsigned int devfn, int offset, + int len, u32 val) +{ + struct pci_controller *hose = pci_bus_to_host(bus); + volatile void __iomem *cfg_data; + u8 cfg_type = 0; + u32 bus_no, reg; + + if (hose->indirect_type & INDIRECT_TYPE_NO_PCIE_LINK) { + if (bus->number != hose->first_busno) + return PCIBIOS_DEVICE_NOT_FOUND; + if (devfn != 0) + return PCIBIOS_DEVICE_NOT_FOUND; + } + + if (hose->indirect_type & INDIRECT_TYPE_SET_CFG_TYPE) + if (bus->number != hose->first_busno) + cfg_type = 1; + + bus_no = (bus->number == hose->first_busno) ? + hose->self_busno : bus->number; + + if (hose->indirect_type & INDIRECT_TYPE_EXT_REG) + reg = ((offset & 0xf00) << 16) | (offset & 0xfc); + else + reg = offset & 0xfc; + + if (hose->indirect_type & INDIRECT_TYPE_BIG_ENDIAN) + out_be32(hose->cfg_addr, (0x80000000 | (bus_no << 16) | + (devfn << 8) | reg | cfg_type)); + else + out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) | + (devfn << 8) | reg | cfg_type)); + + /* surpress setting of PCI_PRIMARY_BUS */ + if (hose->indirect_type & INDIRECT_TYPE_SURPRESS_PRIMARY_BUS) + if ((offset == PCI_PRIMARY_BUS) && + (bus->number == hose->first_busno)) + val &= 0xffffff00; + + /* Workaround for PCI_28 Errata in 440EPx/GRx */ + if ((hose->indirect_type & INDIRECT_TYPE_BROKEN_MRM) && + offset == PCI_CACHE_LINE_SIZE) { + val = 0; + } + + /* + * Note: the caller has already checked that offset is + * suitably aligned and that len is 1, 2 or 4. + */ + cfg_data = hose->cfg_data + (offset & 3); + switch (len) { + case 1: + out_8(cfg_data, val); + break; + case 2: + out_le16(cfg_data, val); + break; + default: + out_le32(cfg_data, val); + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +static struct pci_ops indirect_pci_ops = { + .read = indirect_read_config, + .write = indirect_write_config, +}; + +void __init +setup_indirect_pci(struct pci_controller *hose, + resource_size_t cfg_addr, + resource_size_t cfg_data, u32 flags) +{ + resource_size_t base = cfg_addr & PAGE_MASK; + void __iomem *mbase; + + mbase = ioremap(base, PAGE_SIZE); + hose->cfg_addr = mbase + (cfg_addr & ~PAGE_MASK); + if ((cfg_data & PAGE_MASK) != base) + mbase = ioremap(cfg_data & PAGE_MASK, PAGE_SIZE); + hose->cfg_data = mbase + (cfg_data & ~PAGE_MASK); + hose->ops = &indirect_pci_ops; + hose->indirect_type = flags; +} diff --git a/arch/microblaze/pci/iomap.c b/arch/microblaze/pci/iomap.c new file mode 100644 index 000000000000..3fbf16f4e16c --- /dev/null +++ b/arch/microblaze/pci/iomap.c @@ -0,0 +1,39 @@ +/* + * ppc64 "iomap" interface implementation. + * + * (C) Copyright 2004 Linus Torvalds + */ +#include +#include +#include +#include +#include + +void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max) +{ + resource_size_t start = pci_resource_start(dev, bar); + resource_size_t len = pci_resource_len(dev, bar); + unsigned long flags = pci_resource_flags(dev, bar); + + if (!len) + return NULL; + if (max && len > max) + len = max; + if (flags & IORESOURCE_IO) + return ioport_map(start, len); + if (flags & IORESOURCE_MEM) + return ioremap(start, len); + /* What? */ + return NULL; +} +EXPORT_SYMBOL(pci_iomap); + +void pci_iounmap(struct pci_dev *dev, void __iomem *addr) +{ + if (isa_vaddr_is_ioport(addr)) + return; + if (pcibios_vaddr_is_ioport(addr)) + return; + iounmap(addr); +} +EXPORT_SYMBOL(pci_iounmap); diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 3d102dd87c9f..0b51857fbaf7 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile @@ -48,6 +48,7 @@ obj-$(CONFIG_PPC) += setup-bus.o obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o obj-$(CONFIG_X86_VISWS) += setup-irq.o obj-$(CONFIG_MN10300) += setup-bus.o +obj-$(CONFIG_MICROBLAZE) += setup-bus.o # # ACPI Related PCI FW Functions -- cgit v1.2.3 From 733cc2183116b216abb52e709709bb0e626c9a75 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Mon, 18 Jan 2010 15:27:11 +0100 Subject: microblaze: Add support for Xilinx PCI host bridge This patch is based on powerpc patch 64f16502475ddf663169369fffff6da9b10ea9fb We did some cleanups and removed powerpc parts. There is one new debug early listing function too. Exclude function is only in Debug options. We tested in on custom board. Signed-off-by: Michal Simek --- arch/microblaze/Kconfig | 4 + arch/microblaze/include/asm/pci.h | 7 ++ arch/microblaze/kernel/setup.c | 3 + arch/microblaze/pci/Makefile | 1 + arch/microblaze/pci/xilinx_pci.c | 168 ++++++++++++++++++++++++++++++++++++++ 5 files changed, 183 insertions(+) create mode 100644 arch/microblaze/pci/xilinx_pci.c diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index e1fa0844ba4e..c1e731bb3301 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -267,6 +267,10 @@ config PCI_DOMAINS config PCI_SYSCALL def_bool PCI +config PCI_XILINX + bool "Xilinx PCI host bridge support" + depends on PCI + source "drivers/pci/Kconfig" endmenu diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h index fecc04481101..bdd65aaee30d 100644 --- a/arch/microblaze/include/asm/pci.h +++ b/arch/microblaze/include/asm/pci.h @@ -166,5 +166,12 @@ extern void pci_resource_to_user(const struct pci_dev *dev, int bar, extern void pcibios_setup_bus_devices(struct pci_bus *bus); extern void pcibios_setup_bus_self(struct pci_bus *bus); +/* This part of code was originaly in xilinx-pci.h */ +#ifdef CONFIG_PCI_XILINX +extern void __init xilinx_pci_init(void); +#else +static inline void __init xilinx_pci_init(void) { return; } +#endif + #endif /* __KERNEL__ */ #endif /* __ASM_MICROBLAZE_PCI_H */ diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c index bc325ac4efd3..cd68e988b48c 100644 --- a/arch/microblaze/kernel/setup.c +++ b/arch/microblaze/kernel/setup.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -64,6 +65,8 @@ void __init setup_arch(char **cmdline_p) setup_memory(); + xilinx_pci_init(); + #if defined(CONFIG_SELFMOD_INTC) || defined(CONFIG_SELFMOD_TIMER) printk(KERN_NOTICE "Self modified code enable\n"); #endif diff --git a/arch/microblaze/pci/Makefile b/arch/microblaze/pci/Makefile index 2b8901864b23..9889cc2e1294 100644 --- a/arch/microblaze/pci/Makefile +++ b/arch/microblaze/pci/Makefile @@ -3,3 +3,4 @@ # obj-$(CONFIG_PCI) += pci_32.o pci-common.o indirect_pci.o iomap.o +obj-$(CONFIG_PCI_XILINX) += xilinx_pci.o diff --git a/arch/microblaze/pci/xilinx_pci.c b/arch/microblaze/pci/xilinx_pci.c new file mode 100644 index 000000000000..7869a41b0f94 --- /dev/null +++ b/arch/microblaze/pci/xilinx_pci.c @@ -0,0 +1,168 @@ +/* + * PCI support for Xilinx plbv46_pci soft-core which can be used on + * Xilinx Virtex ML410 / ML510 boards. + * + * Copyright 2009 Roderick Colenbrander + * Copyright 2009 Secret Lab Technologies Ltd. + * + * The pci bridge fixup code was copied from ppc4xx_pci.c and was written + * by Benjamin Herrenschmidt. + * Copyright 2007 Ben. Herrenschmidt , IBM Corp. + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include +#include +#include +#include + +#define XPLB_PCI_ADDR 0x10c +#define XPLB_PCI_DATA 0x110 +#define XPLB_PCI_BUS 0x114 + +#define PCI_HOST_ENABLE_CMD (PCI_COMMAND_SERR | PCI_COMMAND_PARITY | \ + PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY) + +static struct of_device_id xilinx_pci_match[] = { + { .compatible = "xlnx,plbv46-pci-1.03.a", }, + {} +}; + +/** + * xilinx_pci_fixup_bridge - Block Xilinx PHB configuration. + */ +static void xilinx_pci_fixup_bridge(struct pci_dev *dev) +{ + struct pci_controller *hose; + int i; + + if (dev->devfn || dev->bus->self) + return; + + hose = pci_bus_to_host(dev->bus); + if (!hose) + return; + + if (!of_match_node(xilinx_pci_match, hose->dn)) + return; + + /* Hide the PCI host BARs from the kernel as their content doesn't + * fit well in the resource management + */ + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { + dev->resource[i].start = 0; + dev->resource[i].end = 0; + dev->resource[i].flags = 0; + } + + dev_info(&dev->dev, "Hiding Xilinx plb-pci host bridge resources %s\n", + pci_name(dev)); +} +DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, xilinx_pci_fixup_bridge); + +#ifdef DEBUG +/** + * xilinx_pci_exclude_device - Don't do config access for non-root bus + * + * This is a hack. Config access to any bus other than bus 0 does not + * currently work on the ML510 so we prevent it here. + */ +static int +xilinx_pci_exclude_device(struct pci_controller *hose, u_char bus, u8 devfn) +{ + return (bus != 0); +} + +/** + * xilinx_early_pci_scan - List pci config space for available devices + * + * List pci devices in very early phase. + */ +void __init xilinx_early_pci_scan(struct pci_controller *hose) +{ + u32 bus = 0; + u32 val, dev, func, offset; + + /* Currently we have only 2 device connected - up-to 32 devices */ + for (dev = 0; dev < 2; dev++) { + /* List only first function number - up-to 8 functions */ + for (func = 0; func < 1; func++) { + printk(KERN_INFO "%02x:%02x:%02x", bus, dev, func); + /* read the first 64 standardized bytes */ + /* Up-to 192 bytes can be list of capabilities */ + for (offset = 0; offset < 64; offset += 4) { + early_read_config_dword(hose, bus, + PCI_DEVFN(dev, func), offset, &val); + if (offset == 0 && val == 0xFFFFFFFF) { + printk(KERN_CONT "\nABSENT"); + break; + } + if (!(offset % 0x10)) + printk(KERN_CONT "\n%04x: ", offset); + + printk(KERN_CONT "%08x ", val); + } + printk(KERN_INFO "\n"); + } + } +} +#else +void __init xilinx_early_pci_scan(struct pci_controller *hose) +{ +} +#endif + +/** + * xilinx_pci_init - Find and register a Xilinx PCI host bridge + */ +void __init xilinx_pci_init(void) +{ + struct pci_controller *hose; + struct resource r; + void __iomem *pci_reg; + struct device_node *pci_node; + + pci_node = of_find_matching_node(NULL, xilinx_pci_match); + if (!pci_node) + return; + + if (of_address_to_resource(pci_node, 0, &r)) { + pr_err("xilinx-pci: cannot resolve base address\n"); + return; + } + + hose = pcibios_alloc_controller(pci_node); + if (!hose) { + pr_err("xilinx-pci: pcibios_alloc_controller() failed\n"); + return; + } + + /* Setup config space */ + setup_indirect_pci(hose, r.start + XPLB_PCI_ADDR, + r.start + XPLB_PCI_DATA, + INDIRECT_TYPE_SET_CFG_TYPE); + + /* According to the xilinx plbv46_pci documentation the soft-core starts + * a self-init when the bus master enable bit is set. Without this bit + * set the pci bus can't be scanned. + */ + early_write_config_word(hose, 0, 0, PCI_COMMAND, PCI_HOST_ENABLE_CMD); + + /* Set the max latency timer to 255 */ + early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0xff); + + /* Set the max bus number to 255, and bus/subbus no's to 0 */ + pci_reg = of_iomap(pci_node, 0); + out_be32(pci_reg + XPLB_PCI_BUS, 0x000000ff); + iounmap(pci_reg); + + /* Register the host bridge with the linux kernel! */ + pci_process_bridge_OF_ranges(hose, pci_node, + INDIRECT_TYPE_SET_CFG_TYPE); + + pr_info("xilinx-pci: Registered PCI host bridge\n"); + xilinx_early_pci_scan(hose); +} -- cgit v1.2.3 From 83ff56f46a8532488ee364bb93a9cb2a59490d33 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 9 Mar 2010 10:22:19 -0500 Subject: kprobes: Calculate the index correctly when freeing the out-of-line execution slot From : Ananth N Mavinakayanahalli When freeing the instruction slot, the arithmetic to calculate the index of the slot in the page needs to account for the total size of the instruction on the various architectures. Calculate the index correctly when freeing the out-of-line execution slot. Reported-by: Sachin Sant Reported-by: Heiko Carstens Signed-off-by: Ananth N Mavinakayanahalli Signed-off-by: Masami Hiramatsu LKML-Reference: <4B9667AB.9050507@redhat.com> Signed-off-by: Ingo Molnar --- kernel/kprobes.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kernel/kprobes.c b/kernel/kprobes.c index fa034d29cf73..0ed46f3e51e9 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -259,7 +259,8 @@ static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c, struct kprobe_insn_page *kip; list_for_each_entry(kip, &c->pages, list) { - long idx = ((long)slot - (long)kip->insns) / c->insn_size; + long idx = ((long)slot - (long)kip->insns) / + (c->insn_size * sizeof(kprobe_opcode_t)); if (idx >= 0 && idx < slots_per_page(c)) { WARN_ON(kip->slot_used[idx] != SLOT_USED); if (dirty) { -- cgit v1.2.3 From 777537905744c28b02c283692e7f75f5445c1afa Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Tue, 12 Jan 2010 09:55:10 +0100 Subject: microblaze: Add support from PREEMPT This patch add core PREEMPT support for Microblaze. I tried to trace it via tracers and I was able to see any output. I also added low level debug functions to see if that code is called. Signed-off-by: Michal Simek --- arch/microblaze/include/asm/tlbflush.h | 2 +- arch/microblaze/kernel/asm-offsets.c | 1 + arch/microblaze/kernel/entry.S | 25 ++++++++++++++++++++++++- 3 files changed, 26 insertions(+), 2 deletions(-) diff --git a/arch/microblaze/include/asm/tlbflush.h b/arch/microblaze/include/asm/tlbflush.h index 10ec70cd8735..bcb8b41d55af 100644 --- a/arch/microblaze/include/asm/tlbflush.h +++ b/arch/microblaze/include/asm/tlbflush.h @@ -23,7 +23,7 @@ extern void _tlbie(unsigned long address); extern void _tlbia(void); -#define __tlbia() _tlbia() +#define __tlbia() { preempt_disable(); _tlbia(); preempt_enable(); } static inline void local_flush_tlb_all(void) { __tlbia(); } diff --git a/arch/microblaze/kernel/asm-offsets.c b/arch/microblaze/kernel/asm-offsets.c index 7bc7b68f97db..0071260a672c 100644 --- a/arch/microblaze/kernel/asm-offsets.c +++ b/arch/microblaze/kernel/asm-offsets.c @@ -90,6 +90,7 @@ int main(int argc, char *argv[]) DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context)); + DEFINE(TI_PREEMPT_COUNT, offsetof(struct thread_info, preempt_count)); BLANK(); /* struct cpu_context */ diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S index 3bad4ff49471..1a6729dde49e 100644 --- a/arch/microblaze/kernel/entry.S +++ b/arch/microblaze/kernel/entry.S @@ -853,7 +853,30 @@ no_intr_resched: lwi r1, r1, PT_R1 - PT_SIZE; bri 6f; /* MS: Return to kernel state. */ -2: VM_OFF /* MS: turn off MMU */ +2: +#ifdef CONFIG_PREEMPT + add r11, r0, CURRENT_TASK; + lwi r11, r11, TS_THREAD_INFO; + /* MS: get preempt_count from thread info */ + lwi r5, r11, TI_PREEMPT_COUNT; + bgti r5, restore; + + lwi r5, r11, TI_FLAGS; /* get flags in thread info */ + andi r5, r5, _TIF_NEED_RESCHED; + beqi r5, restore /* if zero jump over */ + +preempt: + /* interrupts are off that's why I am calling preempt_chedule_irq */ + bralid r15, preempt_schedule_irq + nop + add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ + lwi r11, r11, TS_THREAD_INFO; /* get thread info */ + lwi r5, r11, TI_FLAGS; /* get flags in thread info */ + andi r5, r5, _TIF_NEED_RESCHED; + bnei r5, preempt /* if non zero jump to resched */ +restore: +#endif + VM_OFF /* MS: turn off MMU */ tophys(r1,r1) lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */ lwi r4, r1, PTO + PT_R4; -- cgit v1.2.3 From 407c1da07d5afa001ed0fdb8f379c00bbd09990a Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Tue, 12 Jan 2010 14:51:04 +0100 Subject: microblaze: Move cache function to cache.c It is better to have init cache handling on one place. Signed-off-by: Michal Simek --- arch/microblaze/kernel/cpu/cache.c | 5 +++++ arch/microblaze/kernel/setup.c | 6 ------ 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c index 2a56bccce4e0..5425f89335ed 100644 --- a/arch/microblaze/kernel/cpu/cache.c +++ b/arch/microblaze/kernel/cpu/cache.c @@ -532,4 +532,9 @@ void microblaze_cache_init(void) } } } + invalidate_dcache(); + enable_dcache(); + + invalidate_icache(); + enable_icache(); } diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c index cd68e988b48c..f974ec7aa357 100644 --- a/arch/microblaze/kernel/setup.c +++ b/arch/microblaze/kernel/setup.c @@ -57,12 +57,6 @@ void __init setup_arch(char **cmdline_p) microblaze_cache_init(); - invalidate_dcache(); - enable_dcache(); - - invalidate_icache(); - enable_icache(); - setup_memory(); xilinx_pci_init(); -- cgit v1.2.3 From 7ae5f21361fea11f58c398701da635f778635d13 Mon Sep 17 00:00:00 2001 From: John Kacur Date: Thu, 11 Mar 2010 13:57:00 +0100 Subject: perf: Make the install relative to DESTDIR if specified Without this change, the install path is relative to prefix/DESTDIR where prefix is automatically set to $HOME. This can produce unexpected results. For example: make -C tools/perf DESTDIR=/home/jkacur/tmp install-man creates the directory: /home/jkacur/home/jkacur/tmp/share/... instead of the expected: /home/jkacur/tmp/share/... Signed-off-by: John Kacur Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Tom Zanussi Cc: Kyle McMartin Cc: LKML-Reference: <1268312220-12880-1-git-send-email-jkacur@redhat.com> Signed-off-by: Ingo Molnar --- tools/perf/Documentation/Makefile | 4 +++- tools/perf/Makefile | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/tools/perf/Documentation/Makefile b/tools/perf/Documentation/Makefile index bdd3b7ecad0a..bd498d496952 100644 --- a/tools/perf/Documentation/Makefile +++ b/tools/perf/Documentation/Makefile @@ -24,7 +24,10 @@ DOC_MAN1=$(patsubst %.txt,%.1,$(MAN1_TXT)) DOC_MAN5=$(patsubst %.txt,%.5,$(MAN5_TXT)) DOC_MAN7=$(patsubst %.txt,%.7,$(MAN7_TXT)) +# Make the path relative to DESTDIR, not prefix +ifndef DESTDIR prefix?=$(HOME) +endif bindir?=$(prefix)/bin htmldir?=$(prefix)/share/doc/perf-doc pdfdir?=$(prefix)/share/doc/perf-doc @@ -32,7 +35,6 @@ mandir?=$(prefix)/share/man man1dir=$(mandir)/man1 man5dir=$(mandir)/man5 man7dir=$(mandir)/man7 -# DESTDIR= ASCIIDOC=asciidoc ASCIIDOC_EXTRA = --unsafe diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 5840499e2d22..8a8f52db7e38 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -216,7 +216,10 @@ STRIP ?= strip # runtime figures out where they are based on the path to the executable. # This can help installing the suite in a relocatable way. +# Make the path relative to DESTDIR, not to prefix +ifndef DESTDIR prefix = $(HOME) +endif bindir_relative = bin bindir = $(prefix)/$(bindir_relative) mandir = share/man @@ -233,7 +236,6 @@ sysconfdir = $(prefix)/etc ETC_PERFCONFIG = etc/perfconfig endif lib = lib -# DESTDIR= export prefix bindir sharedir sysconfdir -- cgit v1.2.3 From d79f3b06a9e40b382bd5d5ae8dea9b3210eda9ce Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Mon, 8 Feb 2010 12:13:10 +0100 Subject: microblaze: Preliminary support for dma drivers I found several problems for ll_temac driver and on system with WB. This early fix should fix it. I will clean this patch before I will add it to mainline Signed-off-by: Michal Simek --- arch/microblaze/include/asm/io.h | 3 --- arch/microblaze/include/asm/page.h | 12 +++++------- arch/microblaze/kernel/dma.c | 18 ++++++++++++------ 3 files changed, 17 insertions(+), 16 deletions(-) diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h index 06d804b15a51..32d621a56aee 100644 --- a/arch/microblaze/include/asm/io.h +++ b/arch/microblaze/include/asm/io.h @@ -140,9 +140,6 @@ static inline void writel(unsigned int v, volatile void __iomem *addr) #define virt_to_phys(addr) ((unsigned long)__virt_to_phys(addr)) #define virt_to_bus(addr) ((unsigned long)__virt_to_phys(addr)) -#define __page_address(page) \ - (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT)) -#define page_to_phys(page) virt_to_phys((void *)__page_address(page)) #define page_to_bus(page) (page_to_phys(page)) #define bus_to_virt(addr) (phys_to_virt(addr)) diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h index 9b66c0fa9a32..2dd1d04129e0 100644 --- a/arch/microblaze/include/asm/page.h +++ b/arch/microblaze/include/asm/page.h @@ -61,12 +61,6 @@ extern unsigned int __page_offset; */ #define PAGE_OFFSET CONFIG_KERNEL_START -/* - * MAP_NR -- given an address, calculate the index of the page struct which - * points to the address's page. - */ -#define MAP_NR(addr) (((unsigned long)(addr) - PAGE_OFFSET) >> PAGE_SHIFT) - /* * The basic type of a PTE - 32 bit physical addressing. */ @@ -154,7 +148,11 @@ extern int page_is_ram(unsigned long pfn); # define pfn_to_virt(pfn) __va(pfn_to_phys((pfn))) # ifdef CONFIG_MMU -# define virt_to_page(kaddr) (mem_map + MAP_NR(kaddr)) + +# define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)) +# define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT) +# define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) + # else /* CONFIG_MMU */ # define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr))) # define page_to_virt(page) (pfn_to_virt(page_to_pfn(page))) diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index 64bc39f40ba7..f230a8de0bcd 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c @@ -21,10 +21,10 @@ * default the offset is PCI_DRAM_OFFSET. */ -static inline void __dma_sync_page(void *vaddr, unsigned long offset, +static inline void __dma_sync_page(void *paddr, unsigned long offset, size_t size, enum dma_data_direction direction) { - unsigned long start = virt_to_phys(vaddr); + unsigned long start = (unsigned long)paddr; switch (direction) { case DMA_TO_DEVICE: @@ -79,10 +79,12 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, struct scatterlist *sg; int i; + /* FIXME this part of code is untested */ for_each_sg(sgl, sg, nents, i) { sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev); sg->dma_length = sg->length; - __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); + __dma_sync_page(page_to_phys(sg_page(sg)), sg->offset, + sg->length, direction); } return nents; @@ -107,7 +109,7 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev, struct dma_attrs *attrs) { BUG_ON(direction == DMA_NONE); - __dma_sync_page(page, offset, size, direction); + __dma_sync_page(page_to_phys(page), offset, size, direction); return page_to_phys(page) + offset + get_dma_direct_offset(dev); } @@ -117,8 +119,12 @@ static inline void dma_direct_unmap_page(struct device *dev, enum dma_data_direction direction, struct dma_attrs *attrs) { -/* There is not necessary to do cache cleanup */ - /* __dma_sync_page(dma_address, 0 , size, direction); */ +/* There is not necessary to do cache cleanup + * + * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and + * dma_address is physical address + */ + __dma_sync_page((void *)dma_address, 0 , size, direction); } struct dma_map_ops dma_direct_ops = { -- cgit v1.2.3 From 137d0795a72786fa33e6900cb2ac2eae81f4b6ee Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Thu, 4 Feb 2010 11:42:24 +0100 Subject: microblaze: Change temp register for cmdline For copy was used r7 register when CONFIG_CMDLINE_BOOL option is enabled. But r7 stores pointer to fdt that's why machine_early_init not detect compiled-in DTB. I also moved kernel PID setup to have TLB init in one block Signed-off-by: Michal Simek --- arch/microblaze/kernel/head.S | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S index 30916193fcc7..cb7815cfe5ab 100644 --- a/arch/microblaze/kernel/head.S +++ b/arch/microblaze/kernel/head.S @@ -99,8 +99,8 @@ no_fdt_arg: tophys(r4,r4) /* convert to phys address */ ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */ _copy_command_line: - lbu r7, r5, r6 /* r7=r5+r6 - r5 contain pointer to command line */ - sb r7, r4, r6 /* addr[r4+r6]= r7*/ + lbu r2, r5, r6 /* r7=r5+r6 - r5 contain pointer to command line */ + sb r2, r4, r6 /* addr[r4+r6]= r7*/ addik r6, r6, 1 /* increment counting */ bgtid r3, _copy_command_line /* loop for all entries */ addik r3, r3, -1 /* descrement loop */ @@ -136,6 +136,11 @@ _invalidate: addik r3, r3, -1 /* sync */ + /* Setup the kernel PID */ + mts rpid,r0 /* Load the kernel PID */ + nop + bri 4 + /* * We should still be executing code at physical address area * RAM_BASEADDR at this point. However, kernel code is at @@ -146,10 +151,6 @@ _invalidate: addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */ tophys(r4,r3) /* Load the kernel physical address */ - mts rpid,r0 /* Load the kernel PID */ - nop - bri 4 - /* * Configure and load two entries into TLB slots 0 and 1. * In case we are pinning TLBs, these are reserved in by the -- cgit v1.2.3 From e786c6ad2ba7f1f9b9cc9d75d5e7ba7d9cdc550a Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Mon, 8 Feb 2010 14:48:16 +0100 Subject: microblaze: Use generic show_mem() Remove arch-specific show_mem() in favor of the generic version. It is based on powerpc patch. bda2fa535564ace56a395d5b65c6dc81305401fa Signed-off-by: Michal Simek --- arch/microblaze/mm/init.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 6eea5544ad8b..aa6e163411b5 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -193,12 +193,6 @@ void free_initmem(void) (unsigned long)(&__init_end)); } -/* FIXME from arch/powerpc/mm/mem.c*/ -void show_mem(void) -{ - printk(KERN_NOTICE "%s\n", __func__); -} - void __init mem_init(void) { high_memory = (void *)__va(memory_end); -- cgit v1.2.3 From 4c912c1a33abb67aefecb5ed8bd73d91887c4977 Mon Sep 17 00:00:00 2001 From: Frans Pop Date: Sat, 6 Feb 2010 18:47:12 +0100 Subject: microblaze: remove trailing space in messages Signed-off-by: Frans Pop Cc: microblaze-uclinux@itee.uq.edu.au Cc: Michal Simek Signed-off-by: Michal Simek --- arch/microblaze/kernel/cpu/cache.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c index 5425f89335ed..13f0c1de3234 100644 --- a/arch/microblaze/kernel/cpu/cache.c +++ b/arch/microblaze/kernel/cpu/cache.c @@ -493,7 +493,7 @@ const struct scache wt_nomsr_noirq = { #define CPUVER_7_20_A 0x0c #define CPUVER_7_20_D 0x0f -#define INFO(s) printk(KERN_INFO "cache: " s " \n"); +#define INFO(s) printk(KERN_INFO "cache: " s "\n"); void microblaze_cache_init(void) { -- cgit v1.2.3 From dcbae4be907488df5e1cc8a89b7df1a0565c257c Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Tue, 9 Feb 2010 09:25:08 +0100 Subject: microblaze: Preliminary support for dma drivers I found several problems for ll_temac driver and on system with WB. This early fix should fix it. I will clean this patch before I will add it to mainline Signed-off-by: Michal Simek --- arch/microblaze/kernel/dma.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index f230a8de0bcd..fbe1e8184eff 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c @@ -20,18 +20,15 @@ * can set archdata.dma_data to an unsigned long holding the offset. By * default the offset is PCI_DRAM_OFFSET. */ - -static inline void __dma_sync_page(void *paddr, unsigned long offset, +static inline void __dma_sync_page(unsigned long paddr, unsigned long offset, size_t size, enum dma_data_direction direction) { - unsigned long start = (unsigned long)paddr; - switch (direction) { case DMA_TO_DEVICE: - flush_dcache_range(start + offset, start + offset + size); + flush_dcache_range(paddr + offset, paddr + offset + size); break; case DMA_FROM_DEVICE: - invalidate_dcache_range(start + offset, start + offset + size); + invalidate_dcache_range(paddr + offset, paddr + offset + size); break; default: BUG(); -- cgit v1.2.3 From 22607a28213068af113b46862eafa785f00a482e Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Mon, 15 Feb 2010 16:41:40 +0100 Subject: microblaze: Add define for ASM_LOOP It is default option but both options must be measured. Signed-off-by: Michal Simek --- arch/microblaze/kernel/cpu/cache.c | 204 +++++++++++++++++++++++++++++-------- 1 file changed, 161 insertions(+), 43 deletions(-) diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c index 13f0c1de3234..f04d8a86dead 100644 --- a/arch/microblaze/kernel/cpu/cache.c +++ b/arch/microblaze/kernel/cpu/cache.c @@ -15,25 +15,6 @@ #include #include -static inline void __invalidate_flush_icache(unsigned int addr) -{ - __asm__ __volatile__ ("wic %0, r0;" \ - : : "r" (addr)); -} - -static inline void __flush_dcache(unsigned int addr) -{ - __asm__ __volatile__ ("wdc.flush %0, r0;" \ - : : "r" (addr)); -} - -static inline void __invalidate_dcache(unsigned int baseaddr, - unsigned int offset) -{ - __asm__ __volatile__ ("wdc.clear %0, %1;" \ - : : "r" (baseaddr), "r" (offset)); -} - static inline void __enable_icache_msr(void) { __asm__ __volatile__ (" msrset r0, %0; \ @@ -148,9 +129,9 @@ do { \ int step = -line_length; \ BUG_ON(step >= 0); \ \ - __asm__ __volatile__ (" 1: " #op " r0, %0; \ - bgtid %0, 1b; \ - addk %0, %0, %1; \ + __asm__ __volatile__ (" 1: " #op " r0, %0; \ + bgtid %0, 1b; \ + addk %0, %0, %1; \ " : : "r" (len), "r" (step) \ : "memory"); \ } while (0); @@ -162,9 +143,9 @@ do { \ int count = end - start; \ BUG_ON(count <= 0); \ \ - __asm__ __volatile__ (" 1: " #op " %0, %1; \ - bgtid %1, 1b; \ - addk %1, %1, %2; \ + __asm__ __volatile__ (" 1: " #op " %0, %1; \ + bgtid %1, 1b; \ + addk %1, %1, %2; \ " : : "r" (start), "r" (count), \ "r" (step) : "memory"); \ } while (0); @@ -175,7 +156,7 @@ do { \ int volatile temp; \ BUG_ON(end - start <= 0); \ \ - __asm__ __volatile__ (" 1: " #op " %1, r0; \ + __asm__ __volatile__ (" 1: " #op " %1, r0; \ cmpu %0, %1, %2; \ bgtid %0, 1b; \ addk %1, %1, %3; \ @@ -183,10 +164,14 @@ do { \ "r" (line_length) : "memory"); \ } while (0); +#define ASM_LOOP + static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end) { unsigned long flags; - +#ifndef ASM_LOOP + int i; +#endif pr_debug("%s: start 0x%x, end 0x%x\n", __func__, (unsigned int)start, (unsigned int) end); @@ -196,8 +181,13 @@ static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end) local_irq_save(flags); __disable_icache_msr(); +#ifdef ASM_LOOP CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); - +#else + for (i = start; i < end; i += cpuinfo.icache_line_length) + __asm__ __volatile__ ("wic %0, r0;" \ + : : "r" (i)); +#endif __enable_icache_msr(); local_irq_restore(flags); } @@ -206,7 +196,9 @@ static void __flush_icache_range_nomsr_irq(unsigned long start, unsigned long end) { unsigned long flags; - +#ifndef ASM_LOOP + int i; +#endif pr_debug("%s: start 0x%x, end 0x%x\n", __func__, (unsigned int)start, (unsigned int) end); @@ -216,7 +208,13 @@ static void __flush_icache_range_nomsr_irq(unsigned long start, local_irq_save(flags); __disable_icache_nomsr(); +#ifdef ASM_LOOP CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); +#else + for (i = start; i < end; i += cpuinfo.icache_line_length) + __asm__ __volatile__ ("wic %0, r0;" \ + : : "r" (i)); +#endif __enable_icache_nomsr(); local_irq_restore(flags); @@ -225,25 +223,41 @@ static void __flush_icache_range_nomsr_irq(unsigned long start, static void __flush_icache_range_noirq(unsigned long start, unsigned long end) { +#ifndef ASM_LOOP + int i; +#endif pr_debug("%s: start 0x%x, end 0x%x\n", __func__, (unsigned int)start, (unsigned int) end); CACHE_LOOP_LIMITS(start, end, cpuinfo.icache_line_length, cpuinfo.icache_size); +#ifdef ASM_LOOP CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic); +#else + for (i = start; i < end; i += cpuinfo.icache_line_length) + __asm__ __volatile__ ("wic %0, r0;" \ + : : "r" (i)); +#endif } static void __flush_icache_all_msr_irq(void) { unsigned long flags; - +#ifndef ASM_LOOP + int i; +#endif pr_debug("%s\n", __func__); local_irq_save(flags); __disable_icache_msr(); - +#ifdef ASM_LOOP CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); - +#else + for (i = 0; i < cpuinfo.icache_size; + i += cpuinfo.icache_line_length) + __asm__ __volatile__ ("wic %0, r0;" \ + : : "r" (i)); +#endif __enable_icache_msr(); local_irq_restore(flags); } @@ -251,35 +265,59 @@ static void __flush_icache_all_msr_irq(void) static void __flush_icache_all_nomsr_irq(void) { unsigned long flags; - +#ifndef ASM_LOOP + int i; +#endif pr_debug("%s\n", __func__); local_irq_save(flags); __disable_icache_nomsr(); - +#ifdef ASM_LOOP CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); - +#else + for (i = 0; i < cpuinfo.icache_size; + i += cpuinfo.icache_line_length) + __asm__ __volatile__ ("wic %0, r0;" \ + : : "r" (i)); +#endif __enable_icache_nomsr(); local_irq_restore(flags); } static void __flush_icache_all_noirq(void) { +#ifndef ASM_LOOP + int i; +#endif pr_debug("%s\n", __func__); +#ifdef ASM_LOOP CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic); +#else + for (i = 0; i < cpuinfo.icache_size; + i += cpuinfo.icache_line_length) + __asm__ __volatile__ ("wic %0, r0;" \ + : : "r" (i)); +#endif } static void __invalidate_dcache_all_msr_irq(void) { unsigned long flags; - +#ifndef ASM_LOOP + int i; +#endif pr_debug("%s\n", __func__); local_irq_save(flags); __disable_dcache_msr(); - +#ifdef ASM_LOOP CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc); - +#else + for (i = 0; i < cpuinfo.dcache_size; + i += cpuinfo.dcache_line_length) + __asm__ __volatile__ ("wdc %0, r0;" \ + : : "r" (i)); +#endif __enable_dcache_msr(); local_irq_restore(flags); } @@ -287,60 +325,107 @@ static void __invalidate_dcache_all_msr_irq(void) static void __invalidate_dcache_all_nomsr_irq(void) { unsigned long flags; - +#ifndef ASM_LOOP + int i; +#endif pr_debug("%s\n", __func__); local_irq_save(flags); __disable_dcache_nomsr(); - +#ifdef ASM_LOOP CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc); - +#else + for (i = 0; i < cpuinfo.dcache_size; + i += cpuinfo.dcache_line_length) + __asm__ __volatile__ ("wdc %0, r0;" \ + : : "r" (i)); +#endif __enable_dcache_nomsr(); local_irq_restore(flags); } static void __invalidate_dcache_all_noirq_wt(void) { +#ifndef ASM_LOOP + int i; +#endif pr_debug("%s\n", __func__); +#ifdef ASM_LOOP CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc) +#else + for (i = 0; i < cpuinfo.dcache_size; + i += cpuinfo.dcache_line_length) + __asm__ __volatile__ ("wdc %0, r0;" \ + : : "r" (i)); +#endif } /* FIXME this is weird - should be only wdc but not work * MS: I am getting bus errors and other weird things */ static void __invalidate_dcache_all_wb(void) { +#ifndef ASM_LOOP + int i; +#endif pr_debug("%s\n", __func__); +#ifdef ASM_LOOP CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc.clear) +#else + for (i = 0; i < cpuinfo.dcache_size; + i += cpuinfo.dcache_line_length) + __asm__ __volatile__ ("wdc.clear %0, r0;" \ + : : "r" (i)); +#endif } static void __invalidate_dcache_range_wb(unsigned long start, unsigned long end) { +#ifndef ASM_LOOP + int i; +#endif pr_debug("%s: start 0x%x, end 0x%x\n", __func__, (unsigned int)start, (unsigned int) end); CACHE_LOOP_LIMITS(start, end, cpuinfo.dcache_line_length, cpuinfo.dcache_size); +#ifdef ASM_LOOP CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear); +#else + for (i = start; i < end; i += cpuinfo.icache_line_length) + __asm__ __volatile__ ("wdc.clear %0, r0;" \ + : : "r" (i)); +#endif } static void __invalidate_dcache_range_nomsr_wt(unsigned long start, unsigned long end) { +#ifndef ASM_LOOP + int i; +#endif pr_debug("%s: start 0x%x, end 0x%x\n", __func__, (unsigned int)start, (unsigned int) end); CACHE_LOOP_LIMITS(start, end, cpuinfo.dcache_line_length, cpuinfo.dcache_size); +#ifdef ASM_LOOP CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); +#else + for (i = start; i < end; i += cpuinfo.icache_line_length) + __asm__ __volatile__ ("wdc %0, r0;" \ + : : "r" (i)); +#endif } static void __invalidate_dcache_range_msr_irq_wt(unsigned long start, unsigned long end) { unsigned long flags; - +#ifndef ASM_LOOP + int i; +#endif pr_debug("%s: start 0x%x, end 0x%x\n", __func__, (unsigned int)start, (unsigned int) end); CACHE_LOOP_LIMITS(start, end, @@ -349,7 +434,13 @@ static void __invalidate_dcache_range_msr_irq_wt(unsigned long start, local_irq_save(flags); __disable_dcache_msr(); +#ifdef ASM_LOOP CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); +#else + for (i = start; i < end; i += cpuinfo.icache_line_length) + __asm__ __volatile__ ("wdc %0, r0;" \ + : : "r" (i)); +#endif __enable_dcache_msr(); local_irq_restore(flags); @@ -359,7 +450,9 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start, unsigned long end) { unsigned long flags; - +#ifndef ASM_LOOP + int i; +#endif pr_debug("%s: start 0x%x, end 0x%x\n", __func__, (unsigned int)start, (unsigned int) end); @@ -369,7 +462,13 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start, local_irq_save(flags); __disable_dcache_nomsr(); +#ifdef ASM_LOOP CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc); +#else + for (i = start; i < end; i += cpuinfo.icache_line_length) + __asm__ __volatile__ ("wdc %0, r0;" \ + : : "r" (i)); +#endif __enable_dcache_nomsr(); local_irq_restore(flags); @@ -377,19 +476,38 @@ static void __invalidate_dcache_range_nomsr_irq(unsigned long start, static void __flush_dcache_all_wb(void) { +#ifndef ASM_LOOP + int i; +#endif pr_debug("%s\n", __func__); +#ifdef ASM_LOOP CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc.flush); +#else + for (i = 0; i < cpuinfo.dcache_size; + i += cpuinfo.dcache_line_length) + __asm__ __volatile__ ("wdc.flush %0, r0;" \ + : : "r" (i)); +#endif } static void __flush_dcache_range_wb(unsigned long start, unsigned long end) { +#ifndef ASM_LOOP + int i; +#endif pr_debug("%s: start 0x%x, end 0x%x\n", __func__, (unsigned int)start, (unsigned int) end); CACHE_LOOP_LIMITS(start, end, cpuinfo.dcache_line_length, cpuinfo.dcache_size); +#ifdef ASM_LOOP CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush); +#else + for (i = start; i < end; i += cpuinfo.icache_line_length) + __asm__ __volatile__ ("wdc.flush %0, r0;" \ + : : "r" (i)); +#endif } /* struct for wb caches and for wt caches */ -- cgit v1.2.3 From cca5613f0278fb0ae0aba285a496add55d0cabab Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Mon, 22 Feb 2010 11:27:27 +0100 Subject: microblaze: Remove VMALLOC_VMADDR Signed-off-by: Michal Simek --- arch/microblaze/include/asm/pgtable.h | 1 - arch/microblaze/mm/pgtable.c | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h index 1c47f6f8bfb6..e8d25fb8847c 100644 --- a/arch/microblaze/include/asm/pgtable.h +++ b/arch/microblaze/include/asm/pgtable.h @@ -85,7 +85,6 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } #define VMALLOC_START (CONFIG_KERNEL_START + \ max(32 * 1024 * 1024UL, memory_size)) #define VMALLOC_END ioremap_bot -#define VMALLOC_VMADDR(x) ((unsigned long)(x)) #endif /* __ASSEMBLY__ */ diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c index 2820081b21ab..63a6fd07c48f 100644 --- a/arch/microblaze/mm/pgtable.c +++ b/arch/microblaze/mm/pgtable.c @@ -103,7 +103,7 @@ static void __iomem *__ioremap(phys_addr_t addr, unsigned long size, area = get_vm_area(size, VM_IOREMAP); if (area == NULL) return NULL; - v = VMALLOC_VMADDR(area->addr); + v = (unsigned long) area->addr; } else { v = (ioremap_bot -= size); } -- cgit v1.2.3 From b8a84059b5c3a3b9ba3973dcdab1e1d9cc4975e0 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Mon, 22 Feb 2010 11:33:07 +0100 Subject: microblaze: Remove ancient Kconfig option for consistent mapping We don't use CONSISTENT option from Kconfig that's why I am removing them. Signed-off-by: Michal Simek --- arch/microblaze/Kconfig | 33 ++------------------------------- 1 file changed, 2 insertions(+), 31 deletions(-) diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index c1e731bb3301..e451c0898e7a 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -145,7 +145,6 @@ menu "Advanced setup" config ADVANCED_OPTIONS bool "Prompt for advanced kernel configuration options" - depends on MMU help This option will enable prompting for a variety of advanced kernel configuration options. These options can cause the kernel to not @@ -174,7 +173,7 @@ config HIGHMEM_START config LOWMEM_SIZE_BOOL bool "Set maximum low memory" - depends on ADVANCED_OPTIONS + depends on ADVANCED_OPTIONS && MMU help This option allows you to set the maximum amount of memory which will be used as "low memory", that is, memory which the kernel can @@ -186,7 +185,6 @@ config LOWMEM_SIZE_BOOL config LOWMEM_SIZE hex "Maximum low memory size (in bytes)" if LOWMEM_SIZE_BOOL - depends on MMU default "0x30000000" config KERNEL_START_BOOL @@ -207,7 +205,7 @@ config KERNEL_START config TASK_SIZE_BOOL bool "Set custom user task size" - depends on ADVANCED_OPTIONS + depends on ADVANCED_OPTIONS && MMU help This option allows you to set the amount of virtual address space allocated to user tasks. This can be useful in optimizing the @@ -217,35 +215,8 @@ config TASK_SIZE_BOOL config TASK_SIZE hex "Size of user task space" if TASK_SIZE_BOOL - depends on MMU default "0x80000000" -config CONSISTENT_START_BOOL - bool "Set custom consistent memory pool address" - depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE - help - This option allows you to set the base virtual address - of the the consistent memory pool. This pool of virtual - memory is used to make consistent memory allocations. - -config CONSISTENT_START - hex "Base virtual address of consistent memory pool" if CONSISTENT_START_BOOL - depends on MMU - default "0xff100000" if NOT_COHERENT_CACHE - -config CONSISTENT_SIZE_BOOL - bool "Set custom consistent memory pool size" - depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE - help - This option allows you to set the size of the the - consistent memory pool. This pool of virtual memory - is used to make consistent memory allocations. - -config CONSISTENT_SIZE - hex "Size of consistent memory pool" if CONSISTENT_SIZE_BOOL - depends on MMU - default "0x00200000" if NOT_COHERENT_CACHE - endmenu source "mm/Kconfig" -- cgit v1.2.3 From ae8ee1505162f47f8b8cf7a44c26ea6b172e1445 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Mon, 22 Feb 2010 12:09:02 +0100 Subject: microblaze: pgtable.h: move consistent functions Consistent functions will be used for noMMU and MMU kernels. Signed-off-by: Michal Simek --- arch/microblaze/include/asm/pgtable.h | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h index e8d25fb8847c..adcc3320e52a 100644 --- a/arch/microblaze/include/asm/pgtable.h +++ b/arch/microblaze/include/asm/pgtable.h @@ -68,7 +68,6 @@ static inline int pte_file(pte_t pte) { return 0; } extern unsigned long va_to_phys(unsigned long address); extern pte_t *va_to_pte(unsigned long address); -extern unsigned long ioremap_bot, ioremap_base; /* * The following only work if pte_present() is true. @@ -411,7 +410,7 @@ static inline unsigned long pte_update(pte_t *p, unsigned long clr, mts rmsr, %2\n\ nop" : "=&r" (old), "=&r" (tmp), "=&r" (msr), "=m" (*p) - : "r" ((unsigned long)(p+1) - 4), "r" (clr), "r" (set), "m" (*p) + : "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set), "m" (*p) : "cc"); return old; @@ -580,18 +579,11 @@ void mapin_ram(void); int map_page(unsigned long va, phys_addr_t pa, int flags); extern int mem_init_done; -extern unsigned long ioremap_base; -extern unsigned long ioremap_bot; asmlinkage void __init mmu_init(void); void __init *early_get_page(void); -void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle); -void consistent_free(void *vaddr); -void consistent_sync(void *vaddr, size_t size, int direction); -void consistent_sync_page(struct page *page, unsigned long offset, - size_t size, int direction); #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ @@ -600,6 +592,14 @@ void consistent_sync_page(struct page *page, unsigned long offset, #ifndef __ASSEMBLY__ #include +extern unsigned long ioremap_bot, ioremap_base; + +void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle); +void consistent_free(void *vaddr); +void consistent_sync(void *vaddr, size_t size, int direction); +void consistent_sync_page(struct page *page, unsigned long offset, + size_t size, int direction); + void setup_memory(void); #endif /* __ASSEMBLY__ */ -- cgit v1.2.3 From 3a0d7a4dd5b3a6545e5764735b48ab84e64af723 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Mon, 22 Feb 2010 12:16:08 +0100 Subject: microblaze: Add consistent code Remove ancient Kconfig option for consistent code. MMU uses cache inhibit pages. noMMU uses UNCACHE SHADOW feature where is used double ram size. For example: Physical ram is 256MB and cache are setup to cover the same size. But if you setup in HW that size is 512MB and cache covers 256MB than you can use adresses from 256-512MB without caches and correspond with 0-256MB with cache. That's why I am using dcache base/high addresses to find out uncache area. Signed-off-by: Michal Simek --- arch/microblaze/Kconfig | 9 ++ arch/microblaze/mm/Makefile | 2 +- arch/microblaze/mm/consistent.c | 246 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 256 insertions(+), 1 deletion(-) create mode 100644 arch/microblaze/mm/consistent.c diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index e451c0898e7a..203ec61c6d4c 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -156,6 +156,15 @@ config ADVANCED_OPTIONS comment "Default settings for advanced configuration options are used" depends on !ADVANCED_OPTIONS +config XILINX_UNCACHED_SHADOW + bool "Are you using uncached shadow for RAM ?" + depends on ADVANCED_OPTIONS && !MMU + default n + help + This is needed to be able to allocate uncachable memory regions. + The feature requires the design to define the RAM memory controller + window to be twice as large as the actual physical memory. + config HIGHMEM_START_BOOL bool "Set high memory pool address" depends on ADVANCED_OPTIONS && HIGHMEM diff --git a/arch/microblaze/mm/Makefile b/arch/microblaze/mm/Makefile index 6c8a924d9e26..09c49ed87235 100644 --- a/arch/microblaze/mm/Makefile +++ b/arch/microblaze/mm/Makefile @@ -2,6 +2,6 @@ # Makefile # -obj-y := init.o +obj-y := consistent.o init.o obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c new file mode 100644 index 000000000000..a9b443e3fb98 --- /dev/null +++ b/arch/microblaze/mm/consistent.c @@ -0,0 +1,246 @@ +/* + * Microblaze support for cache consistent memory. + * Copyright (C) 2010 Michal Simek + * Copyright (C) 2010 PetaLogix + * Copyright (C) 2005 John Williams + * + * Based on PowerPC version derived from arch/arm/mm/consistent.c + * Copyright (C) 2001 Dan Malek (dmalek@jlc.net) + * Copyright (C) 2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef CONFIG_MMU + +/* I have to use dcache values because I can't relate on ram size */ +#define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1) + +/* + * Consistent memory allocators. Used for DMA devices that want to + * share uncached memory with the processor core. + * My crufty no-MMU approach is simple. In the HW platform we can optionally + * mirror the DDR up above the processor cacheable region. So, memory accessed + * in this mirror region will not be cached. It's alloced from the same + * pool as normal memory, but the handle we return is shifted up into the + * uncached region. This will no doubt cause big problems if memory allocated + * here is not also freed properly. -- JW + */ +void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle) +{ + struct page *page, *end, *free; + unsigned long order; + void *ret, *virt; + + if (in_interrupt()) + BUG(); + + size = PAGE_ALIGN(size); + order = get_order(size); + + page = alloc_pages(gfp, order); + if (!page) + goto no_page; + + /* We could do with a page_to_phys and page_to_bus here. */ + virt = page_address(page); + ret = ioremap(virt_to_phys(virt), size); + if (!ret) + goto no_remap; + + /* + * Here's the magic! Note if the uncached shadow is not implemented, + * it's up to the calling code to also test that condition and make + * other arranegments, such as manually flushing the cache and so on. + */ +#ifdef CONFIG_XILINX_UNCACHED_SHADOW + ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK); +#endif + /* dma_handle is same as physical (shadowed) address */ + *dma_handle = (dma_addr_t)ret; + + /* + * free wasted pages. We skip the first page since we know + * that it will have count = 1 and won't require freeing. + * We also mark the pages in use as reserved so that + * remap_page_range works. + */ + page = virt_to_page(virt); + free = page + (size >> PAGE_SHIFT); + end = page + (1 << order); + + for (; page < end; page++) { + init_page_count(page); + if (page >= free) + __free_page(page); + else + SetPageReserved(page); + } + + return ret; +no_remap: + __free_pages(page, order); +no_page: + return NULL; +} + +#else + +void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle) +{ + int order, err, i; + unsigned long page, va, flags; + phys_addr_t pa; + struct vm_struct *area; + void *ret; + + if (in_interrupt()) + BUG(); + + /* Only allocate page size areas. */ + size = PAGE_ALIGN(size); + order = get_order(size); + + page = __get_free_pages(gfp, order); + if (!page) { + BUG(); + return NULL; + } + + /* + * we need to ensure that there are no cachelines in use, + * or worse dirty in this area. + */ + flush_dcache_range(virt_to_phys(page), virt_to_phys(page) + size); + + /* Allocate some common virtual space to map the new pages. */ + area = get_vm_area(size, VM_ALLOC); + if (area == NULL) { + free_pages(page, order); + return NULL; + } + va = (unsigned long) area->addr; + ret = (void *)va; + + /* This gives us the real physical address of the first page. */ + *dma_handle = pa = virt_to_bus((void *)page); + + /* MS: This is the whole magic - use cache inhibit pages */ + flags = _PAGE_KERNEL | _PAGE_NO_CACHE; + + /* + * Set refcount=1 on all pages in an order>0 + * allocation so that vfree() will actually + * free all pages that were allocated. + */ + if (order > 0) { + struct page *rpage = virt_to_page(page); + for (i = 1; i < (1 << order); i++) + init_page_count(rpage+i); + } + + err = 0; + for (i = 0; i < size && err == 0; i += PAGE_SIZE) + err = map_page(va+i, pa+i, flags); + + if (err) { + vfree((void *)va); + return NULL; + } + + return ret; +} +#endif /* CONFIG_MMU */ +EXPORT_SYMBOL(consistent_alloc); + +/* + * free page(s) as defined by the above mapping. + */ +void consistent_free(void *vaddr) +{ + if (in_interrupt()) + BUG(); + + /* Clear SHADOW_MASK bit in address, and free as per usual */ +#ifdef CONFIG_XILINX_UNCACHED_SHADOW + vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK); +#endif + vfree(vaddr); +} +EXPORT_SYMBOL(consistent_free); + +/* + * make an area consistent. + */ +void consistent_sync(void *vaddr, size_t size, int direction) +{ + unsigned long start; + unsigned long end; + + start = (unsigned long)vaddr; + + /* Convert start address back down to unshadowed memory region */ +#ifdef CONFIG_XILINX_UNCACHED_SHADOW + start &= ~UNCACHED_SHADOW_MASK; +#endif + end = start + size; + + switch (direction) { + case PCI_DMA_NONE: + BUG(); + case PCI_DMA_FROMDEVICE: /* invalidate only */ + flush_dcache_range(start, end); + break; + case PCI_DMA_TODEVICE: /* writeback only */ + flush_dcache_range(start, end); + break; + case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */ + flush_dcache_range(start, end); + break; + } +} +EXPORT_SYMBOL(consistent_sync); + +/* + * consistent_sync_page makes memory consistent. identical + * to consistent_sync, but takes a struct page instead of a + * virtual address + */ +void consistent_sync_page(struct page *page, unsigned long offset, + size_t size, int direction) +{ + unsigned long start = (unsigned long)page_address(page) + offset; + consistent_sync((void *)start, size, direction); +} +EXPORT_SYMBOL(consistent_sync_page); -- cgit v1.2.3 From 1be53e084a5bd8f59850348e1066d25aa0200031 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Thu, 11 Mar 2010 14:15:48 +0100 Subject: microblaze: Fix dma alloc and free coherent dma functions We have to use consistent code to be able to do coherent dma function. In consistent code is used cache inhibit page mapping. Xilinx reported that there is bug in Microblaze for WB and d-cache_always use option. Microblaze 7.30.a should be first version where is this bug removed. Signed-off-by: Michal Simek --- arch/microblaze/kernel/dma.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index fbe1e8184eff..b1084974fccd 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c @@ -43,9 +43,14 @@ static unsigned long get_dma_direct_offset(struct device *dev) return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */ } -void *dma_direct_alloc_coherent(struct device *dev, size_t size, +#define NOT_COHERENT_CACHE + +static void *dma_direct_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { +#ifdef NOT_COHERENT_CACHE + return consistent_alloc(flag, size, dma_handle); +#else void *ret; struct page *page; int node = dev_to_node(dev); @@ -61,12 +66,17 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size, *dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev); return ret; +#endif } -void dma_direct_free_coherent(struct device *dev, size_t size, +static void dma_direct_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) { +#ifdef NOT_COHERENT_CACHE + consistent_free(vaddr); +#else free_pages((unsigned long)vaddr, get_order(size)); +#endif } static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, @@ -105,7 +115,6 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev, enum dma_data_direction direction, struct dma_attrs *attrs) { - BUG_ON(direction == DMA_NONE); __dma_sync_page(page_to_phys(page), offset, size, direction); return page_to_phys(page) + offset + get_dma_direct_offset(dev); } @@ -121,7 +130,7 @@ static inline void dma_direct_unmap_page(struct device *dev, * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and * dma_address is physical address */ - __dma_sync_page((void *)dma_address, 0 , size, direction); + __dma_sync_page(dma_address, 0 , size, direction); } struct dma_map_ops dma_direct_ops = { -- cgit v1.2.3 From 79bf3a137617e6deeac411c39f1660b7e91d6348 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Wed, 20 Jan 2010 15:17:08 +0100 Subject: microblaze: PCI early support for noMMU system Signed-off-by: Michal Simek --- arch/microblaze/include/asm/pgalloc.h | 2 +- arch/microblaze/include/asm/pgtable.h | 6 ++++++ arch/microblaze/mm/init.c | 14 +++++++++----- arch/microblaze/pci/pci-common.c | 2 ++ 4 files changed, 18 insertions(+), 6 deletions(-) diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h index 7547f5064560..f44b0d696fe2 100644 --- a/arch/microblaze/include/asm/pgalloc.h +++ b/arch/microblaze/include/asm/pgalloc.h @@ -19,6 +19,7 @@ #include #include #include +#include #define PGDIR_ORDER 0 @@ -111,7 +112,6 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { pte_t *pte; - extern int mem_init_done; extern void *early_get_page(void); if (mem_init_done) { pte = (pte_t *)__get_free_page(GFP_KERNEL | diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h index adcc3320e52a..dd2bb60651c7 100644 --- a/arch/microblaze/include/asm/pgtable.h +++ b/arch/microblaze/include/asm/pgtable.h @@ -16,6 +16,10 @@ #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ remap_pfn_range(vma, vaddr, pfn, size, prot) +#ifndef __ASSEMBLY__ +extern int mem_init_done; +#endif + #ifndef CONFIG_MMU #define pgd_present(pgd) (1) /* pages are always present on non MMU */ @@ -51,6 +55,8 @@ static inline int pte_file(pte_t pte) { return 0; } #define arch_enter_lazy_cpu_mode() do {} while (0) +#define pgprot_noncached_wc(prot) prot + #else /* CONFIG_MMU */ #include diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index aa6e163411b5..1608e2e1a44a 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -23,6 +23,9 @@ #include #include +/* Use for MMU and noMMU because of PCI generic code */ +int mem_init_done; + #ifndef CONFIG_MMU unsigned int __page_offset; EXPORT_SYMBOL(__page_offset); @@ -30,7 +33,6 @@ EXPORT_SYMBOL(__page_offset); #else DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); -int mem_init_done; static int init_bootmem_done; #endif /* CONFIG_MMU */ @@ -202,9 +204,7 @@ void __init mem_init(void) printk(KERN_INFO "Memory: %luk/%luk available\n", nr_free_pages() << (PAGE_SHIFT-10), num_physpages << (PAGE_SHIFT-10)); -#ifdef CONFIG_MMU mem_init_done = 1; -#endif } #ifndef CONFIG_MMU @@ -216,6 +216,10 @@ int ___range_ok(unsigned long addr, unsigned long size) } EXPORT_SYMBOL(___range_ok); +int page_is_ram(unsigned long pfn) +{ + return __range_ok(pfn, 0); +} #else int page_is_ram(unsigned long pfn) { @@ -344,6 +348,8 @@ void __init *early_get_page(void) return p; } +#endif /* CONFIG_MMU */ + void * __init_refok alloc_maybe_bootmem(size_t size, gfp_t mask) { if (mem_init_done) @@ -365,5 +371,3 @@ void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask) } return p; } - -#endif /* CONFIG_MMU */ diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c index f03f8be2740a..0be34350d733 100644 --- a/arch/microblaze/pci/pci-common.c +++ b/arch/microblaze/pci/pci-common.c @@ -561,11 +561,13 @@ int pci_mmap_legacy_page_range(struct pci_bus *bus, * memory, effectively behaving just like /dev/zero */ if ((offset + size) > hose->isa_mem_size) { +#ifdef CONFIG_MMU printk(KERN_DEBUG "Process %s (pid:%d) mapped non-existing PCI" "legacy memory for 0%04x:%02x\n", current->comm, current->pid, pci_domain_nr(bus), bus->number); +#endif if (vma->vm_flags & VM_SHARED) return shmem_zero_setup(vma); return 0; -- cgit v1.2.3 From b1d70c62fff3e8b6224699801c610c244882685a Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Fri, 22 Jan 2010 10:24:06 +0100 Subject: microblaze: Simplify entry.S - save/restore r3/r4 - ret_from_trap There is possible to save r3/r4 at the beggining of user part before calling handlers and at the end restore it. Signed-off-by: Michal Simek --- arch/microblaze/kernel/entry.S | 78 ++++++++++++++++-------------------------- 1 file changed, 30 insertions(+), 48 deletions(-) diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S index 1a6729dde49e..772fe7415f82 100644 --- a/arch/microblaze/kernel/entry.S +++ b/arch/microblaze/kernel/entry.S @@ -305,7 +305,7 @@ C_ENTRY(_user_exception): swi r11, r1, PTO+PT_R1; /* Store user SP. */ addi r11, r0, 1; swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */ -2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ +2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* Save away the syscall number. */ swi r12, r1, PTO+PT_R0; tovirt(r1,r1) @@ -322,8 +322,7 @@ C_ENTRY(_user_exception): rtid r11, 0 nop 3: - add r11, r0, CURRENT_TASK /* Get current task ptr into r11 */ - lwi r11, r11, TS_THREAD_INFO /* get thread info */ + lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */ lwi r11, r11, TI_FLAGS /* get flags in thread info */ andi r11, r11, _TIF_WORK_SYSCALL_MASK beqi r11, 4f @@ -382,58 +381,50 @@ C_ENTRY(ret_from_trap): /* See if returning to kernel mode, if so, skip resched &c. */ bnei r11, 2f; + swi r3, r1, PTO + PT_R3 + swi r4, r1, PTO + PT_R4 + /* We're returning to user mode, so check for various conditions that * trigger rescheduling. */ - # FIXME: Restructure all these flag checks. - add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ - lwi r11, r11, TS_THREAD_INFO; /* get thread info */ + /* FIXME: Restructure all these flag checks. */ + lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ lwi r11, r11, TI_FLAGS; /* get flags in thread info */ andi r11, r11, _TIF_WORK_SYSCALL_MASK beqi r11, 1f - swi r3, r1, PTO + PT_R3 - swi r4, r1, PTO + PT_R4 brlid r15, do_syscall_trace_leave addik r5, r1, PTO + PT_R0 - lwi r3, r1, PTO + PT_R3 - lwi r4, r1, PTO + PT_R4 1: - /* We're returning to user mode, so check for various conditions that * trigger rescheduling. */ - /* Get current task ptr into r11 */ - add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ - lwi r11, r11, TS_THREAD_INFO; /* get thread info */ + /* get thread info from current task */ + lwi r11, CURRENT_TASK, TS_THREAD_INFO; lwi r11, r11, TI_FLAGS; /* get flags in thread info */ andi r11, r11, _TIF_NEED_RESCHED; beqi r11, 5f; - swi r3, r1, PTO + PT_R3; /* store syscall result */ - swi r4, r1, PTO + PT_R4; bralid r15, schedule; /* Call scheduler */ nop; /* delay slot */ - lwi r3, r1, PTO + PT_R3; /* restore syscall result */ - lwi r4, r1, PTO + PT_R4; /* Maybe handle a signal */ -5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ - lwi r11, r11, TS_THREAD_INFO; /* get thread info */ +5: /* get thread info from current task*/ + lwi r11, CURRENT_TASK, TS_THREAD_INFO; lwi r11, r11, TI_FLAGS; /* get flags in thread info */ andi r11, r11, _TIF_SIGPENDING; beqi r11, 1f; /* Signals to handle, handle them */ - swi r3, r1, PTO + PT_R3; /* store syscall result */ - swi r4, r1, PTO + PT_R4; la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ add r6, r0, r0; /* Arg 2: sigset_t *oldset */ addi r7, r0, 1; /* Arg 3: int in_syscall */ bralid r15, do_signal; /* Handle any signals */ nop; + +/* Finally, return to user state. */ +1: lwi r3, r1, PTO + PT_R3; /* restore syscall result */ lwi r4, r1, PTO + PT_R4; -/* Finally, return to user state. */ -1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ + swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */ VM_OFF; @@ -565,7 +556,7 @@ C_ENTRY(sys_rt_sigreturn_wrapper): swi r11, r1, PTO+PT_R1; /* Store user SP. */ \ addi r11, r0, 1; \ swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode.*/\ -2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\ +2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); \ /* Save away the syscall number. */ \ swi r0, r1, PTO+PT_R0; \ tovirt(r1,r1) @@ -673,9 +664,7 @@ C_ENTRY(ret_from_exc): /* We're returning to user mode, so check for various conditions that trigger rescheduling. */ - /* Get current task ptr into r11 */ - add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ - lwi r11, r11, TS_THREAD_INFO; /* get thread info */ + lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ lwi r11, r11, TI_FLAGS; /* get flags in thread info */ andi r11, r11, _TIF_NEED_RESCHED; beqi r11, 5f; @@ -685,8 +674,7 @@ C_ENTRY(ret_from_exc): nop; /* delay slot */ /* Maybe handle a signal */ -5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ - lwi r11, r11, TS_THREAD_INFO; /* get thread info */ +5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ lwi r11, r11, TI_FLAGS; /* get flags in thread info */ andi r11, r11, _TIF_SIGPENDING; beqi r11, 1f; /* Signals to handle, handle them */ @@ -802,7 +790,7 @@ C_ENTRY(_interrupt): swi r11, r0, TOPHYS(PER_CPU(KM)); 2: - lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); + lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); swi r0, r1, PTO + PT_R0; tovirt(r1,r1) la r5, r1, PTO; @@ -817,8 +805,7 @@ ret_from_irq: lwi r11, r1, PTO + PT_MODE; bnei r11, 2f; - add r11, r0, CURRENT_TASK; - lwi r11, r11, TS_THREAD_INFO; + lwi r11, CURRENT_TASK, TS_THREAD_INFO; lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */ andi r11, r11, _TIF_NEED_RESCHED; beqi r11, 5f @@ -826,8 +813,7 @@ ret_from_irq: nop; /* delay slot */ /* Maybe handle a signal */ -5: add r11, r0, CURRENT_TASK; - lwi r11, r11, TS_THREAD_INFO; /* MS: get thread info */ +5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */ lwi r11, r11, TI_FLAGS; /* get flags in thread info */ andi r11, r11, _TIF_SIGPENDING; beqid r11, no_intr_resched @@ -855,8 +841,7 @@ no_intr_resched: /* MS: Return to kernel state. */ 2: #ifdef CONFIG_PREEMPT - add r11, r0, CURRENT_TASK; - lwi r11, r11, TS_THREAD_INFO; + lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get preempt_count from thread info */ lwi r5, r11, TI_PREEMPT_COUNT; bgti r5, restore; @@ -869,8 +854,7 @@ preempt: /* interrupts are off that's why I am calling preempt_chedule_irq */ bralid r15, preempt_schedule_irq nop - add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ - lwi r11, r11, TS_THREAD_INFO; /* get thread info */ + lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ lwi r5, r11, TI_FLAGS; /* get flags in thread info */ andi r5, r5, _TIF_NEED_RESCHED; bnei r5, preempt /* if non zero jump to resched */ @@ -938,7 +922,7 @@ C_ENTRY(_debug_exception): swi r11, r1, PTO+PT_R1; /* Store user SP. */ addi r11, r0, 1; swi r11, r0, TOPHYS(PER_CPU(KM)); /* Now we're in kernel-mode. */ -2: lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ +2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* Save away the syscall number. */ swi r0, r1, PTO+PT_R0; tovirt(r1,r1) @@ -958,8 +942,7 @@ dbtrap_call: rtbd r11, 0; bnei r11, 2f; /* Get current task ptr into r11 */ - add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ - lwi r11, r11, TS_THREAD_INFO; /* get thread info */ + lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ lwi r11, r11, TI_FLAGS; /* get flags in thread info */ andi r11, r11, _TIF_NEED_RESCHED; beqi r11, 5f; @@ -972,8 +955,7 @@ dbtrap_call: rtbd r11, 0; /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */ /* Maybe handle a signal */ -5: add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ - lwi r11, r11, TS_THREAD_INFO; /* get thread info */ +5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */ lwi r11, r11, TI_FLAGS; /* get flags in thread info */ andi r11, r11, _TIF_SIGPENDING; beqi r11, 1f; /* Signals to handle, handle them */ @@ -1030,7 +1012,7 @@ DBTRAP_return: /* Make global symbol for debugging */ ENTRY(_switch_to) /* prepare return value */ - addk r3, r0, r31 + addk r3, r0, CURRENT_TASK /* save registers in cpu_context */ /* use r11 and r12, volatile registers, as temp register */ @@ -1074,10 +1056,10 @@ ENTRY(_switch_to) nop swi r12, r11, CC_FSR - /* update r31, the current */ - lwi r31, r6, TI_TASK/* give me pointer to task which will be next */ + /* update r31, the current-give me pointer to task which will be next */ + lwi CURRENT_TASK, r6, TI_TASK /* stored it to current_save too */ - swi r31, r0, PER_CPU(CURRENT_SAVE) + swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE) /* get new process' cpu context and restore */ /* give me start where start context of next task */ -- cgit v1.2.3 From 8633bebc63ba5752254925f8b49a19102df1a0ff Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Mon, 22 Feb 2010 13:24:43 +0100 Subject: microblaze: Save current task directly Signed-off-by: Michal Simek --- arch/microblaze/kernel/entry.S | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S index 772fe7415f82..cc9885d441d5 100644 --- a/arch/microblaze/kernel/entry.S +++ b/arch/microblaze/kernel/entry.S @@ -425,8 +425,7 @@ C_ENTRY(ret_from_trap): lwi r4, r1, PTO + PT_R4; swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ - add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ - swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */ + swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ VM_OFF; tophys(r1,r1); RESTORE_REGS; @@ -700,8 +699,7 @@ C_ENTRY(ret_from_exc): /* Finally, return to user state. */ 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ - add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ - swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */ + swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ VM_OFF; tophys(r1,r1); @@ -828,8 +826,7 @@ no_intr_resched: /* Disable interrupts, we are now committed to the state restore */ disable_irq swi r0, r0, PER_CPU(KM); /* MS: Now officially in user state. */ - add r11, r0, CURRENT_TASK; - swi r11, r0, PER_CPU(CURRENT_SAVE); + swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); VM_OFF; tophys(r1,r1); lwi r3, r1, PTO + PT_R3; /* MS: restore saved r3, r4 registers */ @@ -979,8 +976,7 @@ dbtrap_call: rtbd r11, 0; /* Finally, return to user state. */ 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ - add r11, r0, CURRENT_TASK; /* Get current task ptr into r11 */ - swi r11, r0, PER_CPU(CURRENT_SAVE); /* save current */ + swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */ VM_OFF; tophys(r1,r1); -- cgit v1.2.3 From 841d6e8c4e969b2cdd80f7216af34d932c41b1a6 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Fri, 22 Jan 2010 14:28:36 +0100 Subject: microblaze: entry.S use delay slot for return handlers Signed-off-by: Michal Simek --- arch/microblaze/kernel/entry.S | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S index cc9885d441d5..c0ede25c5b99 100644 --- a/arch/microblaze/kernel/entry.S +++ b/arch/microblaze/kernel/entry.S @@ -414,10 +414,9 @@ C_ENTRY(ret_from_trap): beqi r11, 1f; /* Signals to handle, handle them */ la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ - add r6, r0, r0; /* Arg 2: sigset_t *oldset */ addi r7, r0, 1; /* Arg 3: int in_syscall */ bralid r15, do_signal; /* Handle any signals */ - nop; + add r6, r0, r0; /* Arg 2: sigset_t *oldset */ /* Finally, return to user state. */ 1: @@ -692,10 +691,9 @@ C_ENTRY(ret_from_exc): * store return registers separately because this macros is use * for others exceptions */ la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ - add r6, r0, r0; /* Arg 2: sigset_t *oldset */ addi r7, r0, 0; /* Arg 3: int in_syscall */ bralid r15, do_signal; /* Handle any signals */ - nop; + add r6, r0, r0; /* Arg 2: sigset_t *oldset */ /* Finally, return to user state. */ 1: swi r0, r0, PER_CPU(KM); /* Now officially in user state. */ @@ -968,10 +966,9 @@ dbtrap_call: rtbd r11, 0; (in a possibly modified form) after do_signal returns. */ la r5, r1, PTO; /* Arg 1: struct pt_regs *regs */ - add r6, r0, r0; /* Arg 2: sigset_t *oldset */ addi r7, r0, 0; /* Arg 3: int in_syscall */ bralid r15, do_signal; /* Handle any signals */ - nop; + add r6, r0, r0; /* Arg 2: sigset_t *oldset */ /* Finally, return to user state. */ -- cgit v1.2.3 From bb6fbc4548b9ae7ebbd06ef72f00229df259d217 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 11 Mar 2010 09:19:35 -0500 Subject: NFS: Avoid a deadlock in nfs_release_page J.R. Okajima reports the following deadlock: INFO: task kswapd0:305 blocked for more than 120 seconds. "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. kswapd0 D 0000000000000001 0 305 2 0x00000000 ffff88001f21d4f0 0000000000000046 ffff88001fdea680 ffff88001f21c000 ffff88001f21dfd8 ffff88001f21c000 ffff88001f21dfd8 ffff88001f21dfd8 ffff88001fdea040 0000000000014c00 0000000000000001 ffff88001fdea040 Call Trace: [] io_schedule+0x4d/0x70 [] sync_page+0x65/0xa0 [] __wait_on_bit_lock+0x52/0xb0 [] ? sync_page+0x0/0xa0 [] __lock_page+0x64/0x70 [] ? wake_bit_function+0x0/0x40 [] truncate_inode_pages_range+0x344/0x4a0 [] truncate_inode_pages+0x10/0x20 [] generic_delete_inode+0x15e/0x190 [] generic_drop_inode+0x5d/0x80 [] iput+0x78/0x80 [] nfs_dentry_iput+0x38/0x50 [] dentry_iput+0x84/0x110 [] d_kill+0x2e/0x60 [] dput+0x7a/0x170 [] path_put+0x15/0x40 [] __put_nfs_open_context+0xa4/0xb0 [] ? nfs_free_request+0x0/0x50 [] put_nfs_open_context+0xb/0x10 [] nfs_free_request+0x29/0x50 [] kref_put+0x8e/0xe0 [] nfs_release_request+0x14/0x20 [] nfs_find_and_lock_request+0x89/0xa0 [] nfs_wb_page+0x80/0x110 [] nfs_release_page+0x70/0x90 [] try_to_release_page+0x5e/0x80 [] shrink_page_list+0x638/0x860 [] shrink_zone+0x63e/0xc40 We can fix this by making the call to put_nfs_open_context() happen when we actually remove the write request from the inode (which is done by the nfsiod thread in this case). Signed-off-by: Trond Myklebust Cc: stable@kernel.org --- fs/nfs/pagelist.c | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index a12c45b65dd4..29d9d36cd5f4 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -112,12 +112,10 @@ void nfs_unlock_request(struct nfs_page *req) */ int nfs_set_page_tag_locked(struct nfs_page *req) { - struct nfs_inode *nfsi = NFS_I(req->wb_context->path.dentry->d_inode); - if (!nfs_lock_request_dontget(req)) return 0; if (req->wb_page != NULL) - radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); + radix_tree_tag_set(&NFS_I(req->wb_context->path.dentry->d_inode)->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); return 1; } @@ -126,10 +124,10 @@ int nfs_set_page_tag_locked(struct nfs_page *req) */ void nfs_clear_page_tag_locked(struct nfs_page *req) { - struct inode *inode = req->wb_context->path.dentry->d_inode; - struct nfs_inode *nfsi = NFS_I(inode); - if (req->wb_page != NULL) { + struct inode *inode = req->wb_context->path.dentry->d_inode; + struct nfs_inode *nfsi = NFS_I(inode); + spin_lock(&inode->i_lock); radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); nfs_unlock_request(req); @@ -142,16 +140,22 @@ void nfs_clear_page_tag_locked(struct nfs_page *req) * nfs_clear_request - Free up all resources allocated to the request * @req: * - * Release page resources associated with a write request after it - * has completed. + * Release page and open context resources associated with a read/write + * request after it has completed. */ void nfs_clear_request(struct nfs_page *req) { struct page *page = req->wb_page; + struct nfs_open_context *ctx = req->wb_context; + if (page != NULL) { page_cache_release(page); req->wb_page = NULL; } + if (ctx != NULL) { + put_nfs_open_context(ctx); + req->wb_context = NULL; + } } @@ -165,9 +169,8 @@ static void nfs_free_request(struct kref *kref) { struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref); - /* Release struct file or cached credential */ + /* Release struct file and open context */ nfs_clear_request(req); - put_nfs_open_context(req->wb_context); nfs_page_free(req); } -- cgit v1.2.3 From 85cfabbcd10f8d112feee6e2ec64ee78033b6d3c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 11 Mar 2010 13:06:56 +0100 Subject: perf, ppc: Fix compile error due to new cpu notifiers Fix: arch/powerpc/kernel/perf_event.c:1334: error: 'power_pmu_notifier' undeclared (first use in this function) arch/powerpc/kernel/perf_event.c:1334: error: (Each undeclared identifier is reported only once arch/powerpc/kernel/perf_event.c:1334: error: for each function it appears in.) arch/powerpc/kernel/perf_event.c:1334: error: implicit declaration of function 'power_pmu_notifier' arch/powerpc/kernel/perf_event.c:1334: error: implicit declaration of function 'register_cpu_notifier' Due to commit 3f6da390 (perf: Rework and fix the arch CPU-hotplug hooks). Signed-off-by: Peter Zijlstra LKML-Reference: Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/perf_event.c | 2 +- include/linux/perf_event.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index fbe101d7505d..08460a2e9f41 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c @@ -1298,7 +1298,7 @@ static void power_pmu_setup(int cpu) } static int __cpuinit -power_pmu_notify(struct notifier_block *self, unsigned long action, void *hcpu) +power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) { unsigned int cpu = (long)hcpu; diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 70cffd052c04..95477038a72a 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -453,6 +453,7 @@ enum perf_callchain_context { #include #include #include +#include #include #define PERF_MAX_STACK_DEPTH 255 -- cgit v1.2.3 From 45e16a6834b6af098702e5ea6c9a40de42ff77d8 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 11 Mar 2010 13:40:30 +0100 Subject: perf, x86: Fix hw_perf_enable() event assignment What happens is that we schedule badly like: <...>-1987 [019] 280.252808: x86_pmu_start: event-46/1300c0: idx: 0 <...>-1987 [019] 280.252811: x86_pmu_start: event-47/1300c0: idx: 1 <...>-1987 [019] 280.252812: x86_pmu_start: event-48/1300c0: idx: 2 <...>-1987 [019] 280.252813: x86_pmu_start: event-49/1300c0: idx: 3 <...>-1987 [019] 280.252814: x86_pmu_start: event-50/1300c0: idx: 32 <...>-1987 [019] 280.252825: x86_pmu_stop: event-46/1300c0: idx: 0 <...>-1987 [019] 280.252826: x86_pmu_stop: event-47/1300c0: idx: 1 <...>-1987 [019] 280.252827: x86_pmu_stop: event-48/1300c0: idx: 2 <...>-1987 [019] 280.252828: x86_pmu_stop: event-49/1300c0: idx: 3 <...>-1987 [019] 280.252829: x86_pmu_stop: event-50/1300c0: idx: 32 <...>-1987 [019] 280.252834: x86_pmu_start: event-47/1300c0: idx: 1 <...>-1987 [019] 280.252834: x86_pmu_start: event-48/1300c0: idx: 2 <...>-1987 [019] 280.252835: x86_pmu_start: event-49/1300c0: idx: 3 <...>-1987 [019] 280.252836: x86_pmu_start: event-50/1300c0: idx: 32 <...>-1987 [019] 280.252837: x86_pmu_start: event-51/1300c0: idx: 32 *FAIL* This happens because we only iterate the n_running events in the first pass, and reset their index to -1 if they don't match to force a re-assignment. Now, in our RR example, n_running == 0 because we fully unscheduled, so event-50 will retain its idx==32, even though in scheduling it will have gotten idx=0, and we don't trigger the re-assign path. The easiest way to fix this is the below patch, which simply validates the full assignment in the second pass. Reported-by: Stephane Eranian Signed-off-by: Peter Zijlstra LKML-Reference: <1268311069.5037.31.camel@laptop> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index c6bde7d7afdc..5fb490c6ee5c 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -811,7 +811,6 @@ void hw_perf_enable(void) * step2: reprogram moved events into new counters */ for (i = 0; i < n_running; i++) { - event = cpuc->event_list[i]; hwc = &event->hw; @@ -826,21 +825,16 @@ void hw_perf_enable(void) continue; x86_pmu_stop(event); - - hwc->idx = -1; } for (i = 0; i < cpuc->n_events; i++) { - event = cpuc->event_list[i]; hwc = &event->hw; - if (i < n_running && - match_prev_assignment(hwc, cpuc, i)) - continue; - - if (hwc->idx == -1) + if (!match_prev_assignment(hwc, cpuc, i)) x86_assign_hw_event(event, cpuc, i); + else if (i < n_running) + continue; x86_pmu_start(event); } -- cgit v1.2.3 From 639fe4b12f92b54c9c3b38c82cdafaa38cfd3e63 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Thu, 11 Mar 2010 15:30:35 +0800 Subject: perf: export perf_trace_regs and perf_arch_fetch_caller_regs Export perf_trace_regs and perf_arch_fetch_caller_regs since module will use these. Signed-off-by: Xiao Guangrong [ use EXPORT_PER_CPU_SYMBOL_GPL() ] Signed-off-by: Peter Zijlstra LKML-Reference: <4B989C1B.2090407@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 1 + kernel/trace/trace_event_perf.c | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 5fb490c6ee5c..7645faea8e85 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1713,3 +1713,4 @@ void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int ski regs->cs = __KERNEL_CS; local_save_flags(regs->flags); } +EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs); diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index f315b12a41d8..0709e4f75114 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -10,6 +10,7 @@ #include "trace.h" DEFINE_PER_CPU(struct pt_regs, perf_trace_regs); +EXPORT_PER_CPU_SYMBOL_GPL(perf_trace_regs); static char *perf_trace_buf; static char *perf_trace_buf_nmi; -- cgit v1.2.3 From 6f69a1815a93722b360a1439934856e766509002 Mon Sep 17 00:00:00 2001 From: Felipe Balbi Date: Thu, 4 Mar 2010 09:45:53 +0200 Subject: omap2/3/4: ehci: avoid compiler error with touchbook the early_param() call in board-omap3touchbook.c expands to: static const char __setup_str_early_touchbook_revision[] __section(.init.rodata) _aligned(1) = tbr; [...] and we have a non-const variable being added to the same section: static struct ehci_hcd_omap_platform_data ehci_pdata __section(.init.rodata); because of that, gcc generates a section type conflict which can (and actually should) be avoided by marking const every variable marked with __initconst. This patch fixes that for the ehci_hdc_omap_platform_data. Signed-off-by: Felipe Balbi Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/board-3430sdp.c | 2 +- arch/arm/mach-omap2/board-3630sdp.c | 2 +- arch/arm/mach-omap2/board-am3517evm.c | 2 +- arch/arm/mach-omap2/board-cm-t35.c | 2 +- arch/arm/mach-omap2/board-devkit8000.c | 2 +- arch/arm/mach-omap2/board-igep0020.c | 2 +- arch/arm/mach-omap2/board-omap3beagle.c | 2 +- arch/arm/mach-omap2/board-omap3evm.c | 2 +- arch/arm/mach-omap2/board-omap3pandora.c | 2 +- arch/arm/mach-omap2/board-omap3touchbook.c | 2 +- arch/arm/mach-omap2/board-overo.c | 2 +- arch/arm/mach-omap2/board-zoom3.c | 2 +- arch/arm/mach-omap2/usb-ehci.c | 6 +++--- arch/arm/plat-omap/include/plat/usb.h | 2 +- 14 files changed, 16 insertions(+), 16 deletions(-) diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c index a101029ceb6f..5822bcf7b15f 100644 --- a/arch/arm/mach-omap2/board-3430sdp.c +++ b/arch/arm/mach-omap2/board-3430sdp.c @@ -648,7 +648,7 @@ static void enable_board_wakeup_source(void) OMAP_WAKEUP_EN | OMAP_PIN_INPUT_PULLUP); } -static struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { +static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, diff --git a/arch/arm/mach-omap2/board-3630sdp.c b/arch/arm/mach-omap2/board-3630sdp.c index 4386d2b4a785..a0a2a113465c 100755 --- a/arch/arm/mach-omap2/board-3630sdp.c +++ b/arch/arm/mach-omap2/board-3630sdp.c @@ -54,7 +54,7 @@ static void enable_board_wakeup_source(void) OMAP_WAKEUP_EN | OMAP_PIN_INPUT_PULLUP); } -static struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { +static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, diff --git a/arch/arm/mach-omap2/board-am3517evm.c b/arch/arm/mach-omap2/board-am3517evm.c index 70c18614773c..6ae880585d54 100644 --- a/arch/arm/mach-omap2/board-am3517evm.c +++ b/arch/arm/mach-omap2/board-am3517evm.c @@ -273,7 +273,7 @@ static void __init am3517_evm_init_irq(void) omap_gpio_init(); } -static struct ehci_hcd_omap_platform_data ehci_pdata __initdata = { +static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c index afa77caaff4d..2de4f79f03a0 100644 --- a/arch/arm/mach-omap2/board-cm-t35.c +++ b/arch/arm/mach-omap2/board-cm-t35.c @@ -612,7 +612,7 @@ static struct omap2_hsmmc_info mmc[] = { {} /* Terminator */ }; -static struct ehci_hcd_omap_platform_data ehci_pdata = { +static struct ehci_hcd_omap_platform_data ehci_pdata __initdata = { .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c index 371019054b49..5bfc13b3176c 100644 --- a/arch/arm/mach-omap2/board-devkit8000.c +++ b/arch/arm/mach-omap2/board-devkit8000.c @@ -636,7 +636,7 @@ static struct omap_musb_board_data musb_board_data = { .power = 100, }; -static struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { +static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c index 9958987a3d0a..4f1accf7f7f3 100644 --- a/arch/arm/mach-omap2/board-igep0020.c +++ b/arch/arm/mach-omap2/board-igep0020.c @@ -442,7 +442,7 @@ static struct omap_musb_board_data musb_board_data = { .power = 100, }; -static struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { +static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { .port_mode[0] = EHCI_HCD_OMAP_MODE_UNKNOWN, .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c index 6eb77e1f7c82..962d377970e9 100644 --- a/arch/arm/mach-omap2/board-omap3beagle.c +++ b/arch/arm/mach-omap2/board-omap3beagle.c @@ -410,7 +410,7 @@ static void __init omap3beagle_flash_init(void) } } -static struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { +static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c index d6bc88c426b5..017bb2f4f7d2 100644 --- a/arch/arm/mach-omap2/board-omap3evm.c +++ b/arch/arm/mach-omap2/board-omap3evm.c @@ -635,7 +635,7 @@ static struct platform_device *omap3_evm_devices[] __initdata = { &omap3_evm_dss_device, }; -static struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { +static struct ehci_hcd_omap_platform_data ehci_pdata __initdata = { .port_mode[0] = EHCI_HCD_OMAP_MODE_UNKNOWN, .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c index 4827f4658df3..51a5315519c4 100644 --- a/arch/arm/mach-omap2/board-omap3pandora.c +++ b/arch/arm/mach-omap2/board-omap3pandora.c @@ -537,7 +537,7 @@ static struct platform_device *omap3pandora_devices[] __initdata = { &pandora_dss_device, }; -static struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { +static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, .port_mode[1] = EHCI_HCD_OMAP_MODE_UNKNOWN, diff --git a/arch/arm/mach-omap2/board-omap3touchbook.c b/arch/arm/mach-omap2/board-omap3touchbook.c index 2a5bf5cea883..2504d41f923e 100644 --- a/arch/arm/mach-omap2/board-omap3touchbook.c +++ b/arch/arm/mach-omap2/board-omap3touchbook.c @@ -493,7 +493,7 @@ static void __init omap3touchbook_flash_init(void) } } -static struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { +static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY, .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c index 50872a42bec7..8848c7c5ce48 100644 --- a/arch/arm/mach-omap2/board-overo.c +++ b/arch/arm/mach-omap2/board-overo.c @@ -394,7 +394,7 @@ static struct platform_device *overo_devices[] __initdata = { &overo_lcd_device, }; -static struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { +static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { .port_mode[0] = EHCI_HCD_OMAP_MODE_UNKNOWN, .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, diff --git a/arch/arm/mach-omap2/board-zoom3.c b/arch/arm/mach-omap2/board-zoom3.c index d3e3cd5170d1..cd3e40cf3ac1 100644 --- a/arch/arm/mach-omap2/board-zoom3.c +++ b/arch/arm/mach-omap2/board-zoom3.c @@ -52,7 +52,7 @@ static struct omap_board_mux board_mux[] __initdata = { #define board_mux NULL #endif -static struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { +static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = { .port_mode[0] = EHCI_HCD_OMAP_MODE_UNKNOWN, .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY, .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN, diff --git a/arch/arm/mach-omap2/usb-ehci.c b/arch/arm/mach-omap2/usb-ehci.c index f1df873d59db..ee9f548d5d81 100644 --- a/arch/arm/mach-omap2/usb-ehci.c +++ b/arch/arm/mach-omap2/usb-ehci.c @@ -70,7 +70,7 @@ static struct platform_device ehci_device = { /* * setup_ehci_io_mux - initialize IO pad mux for USBHOST */ -static void setup_ehci_io_mux(enum ehci_hcd_omap_mode *port_mode) +static void setup_ehci_io_mux(const enum ehci_hcd_omap_mode *port_mode) { switch (port_mode[0]) { case EHCI_HCD_OMAP_MODE_PHY: @@ -213,7 +213,7 @@ static void setup_ehci_io_mux(enum ehci_hcd_omap_mode *port_mode) return; } -void __init usb_ehci_init(struct ehci_hcd_omap_platform_data *pdata) +void __init usb_ehci_init(const struct ehci_hcd_omap_platform_data *pdata) { platform_device_add_data(&ehci_device, pdata, sizeof(*pdata)); @@ -229,7 +229,7 @@ void __init usb_ehci_init(struct ehci_hcd_omap_platform_data *pdata) #else -void __init usb_ehci_init(struct ehci_hcd_omap_platform_data *pdata) +void __init usb_ehci_init(const struct ehci_hcd_omap_platform_data *pdata) { } diff --git a/arch/arm/plat-omap/include/plat/usb.h b/arch/arm/plat-omap/include/plat/usb.h index 288e29e1c06f..568578db93b6 100644 --- a/arch/arm/plat-omap/include/plat/usb.h +++ b/arch/arm/plat-omap/include/plat/usb.h @@ -53,7 +53,7 @@ enum musb_interface {MUSB_INTERFACE_ULPI, MUSB_INTERFACE_UTMI}; extern void usb_musb_init(struct omap_musb_board_data *board_data); -extern void usb_ehci_init(struct ehci_hcd_omap_platform_data *pdata); +extern void usb_ehci_init(const struct ehci_hcd_omap_platform_data *pdata); #endif -- cgit v1.2.3 From d660f9a26ef81c3bbced92514ffbe82e1b882ee1 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Thu, 11 Mar 2010 07:33:46 +0000 Subject: omap4: Fix build break by moving omap_smc1 into a separate .S This patch moves omap_smc1 function to a seperate omap44xx-smc.S file and sets compile flags as -Wa,-march=armv7-a. This fix was suggested by Tony Lindgren Signed-off-by: Santosh Shilimkar [tony@atomide.com: otherwise multi-omap build with V6 and V7 breaks] Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/Makefile | 3 +++ arch/arm/mach-omap2/board-4430sdp.c | 26 +------------------------- arch/arm/mach-omap2/omap44xx-smc.S | 32 ++++++++++++++++++++++++++++++++ 3 files changed, 36 insertions(+), 25 deletions(-) create mode 100644 arch/arm/mach-omap2/omap44xx-smc.S diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index 2069fb33baaa..4b9fc57770db 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile @@ -22,6 +22,9 @@ obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o # SMP support ONLY available for OMAP4 obj-$(CONFIG_SMP) += omap-smp.o omap-headsmp.o obj-$(CONFIG_LOCAL_TIMERS) += timer-mpu.o +obj-$(CONFIG_ARCH_OMAP4) += omap44xx-smc.o + +AFLAGS_omap44xx-smc.o :=-Wa,-march=armv7-a # Functions loaded to SRAM obj-$(CONFIG_ARCH_OMAP2420) += sram242x.o diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c index 180ac112e527..b88f28c5814b 100644 --- a/arch/arm/mach-omap2/board-4430sdp.c +++ b/arch/arm/mach-omap2/board-4430sdp.c @@ -50,33 +50,9 @@ static struct omap_board_config_kernel sdp4430_config[] __initdata = { }; #ifdef CONFIG_CACHE_L2X0 -noinline void omap_smc1(u32 fn, u32 arg) -{ - register u32 r12 asm("r12") = fn; - register u32 r0 asm("r0") = arg; - - /* This is common routine cache secure monitor API used to - * modify the PL310 secure registers. - * r0 contains the value to be modified and "r12" contains - * the monitor API number. It uses few CPU registers - * internally and hence they need be backed up including - * link register "lr". - * Explicitly save r11 and r12 the compiler generated code - * won't save it. - */ - asm volatile( - "stmfd r13!, {r11,r12}\n" - "dsb\n" - "smc\n" - "ldmfd r13!, {r11,r12}\n" - : "+r" (r0), "+r" (r12) - : - : "r4", "r5", "r10", "lr", "cc"); -} -EXPORT_SYMBOL(omap_smc1); - static int __init omap_l2_cache_init(void) { + extern void omap_smc1(u32 fn, u32 arg); void __iomem *l2cache_base; /* To avoid code running on other OMAPs in diff --git a/arch/arm/mach-omap2/omap44xx-smc.S b/arch/arm/mach-omap2/omap44xx-smc.S new file mode 100644 index 000000000000..89bb2b141473 --- /dev/null +++ b/arch/arm/mach-omap2/omap44xx-smc.S @@ -0,0 +1,32 @@ +/* + * OMAP44xx secure APIs file. + * + * Copyright (C) 2010 Texas Instruments, Inc. + * Written by Santosh Shilimkar + * + * + * This program is free software,you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include + +/* + * This is common routine to manage secure monitor API + * used to modify the PL310 secure registers. + * 'r0' contains the value to be modified and 'r12' contains + * the monitor API number. It uses few CPU registers + * internally and hence they need be backed up including + * link register "lr". + * Function signature : void omap_smc1(u32 fn, u32 arg) + */ + +ENTRY(omap_smc1) + stmfd sp!, {r2-r12, lr} + mov r12, r0 + mov r0, r1 + dsb + smc + ldmfd sp!, {r2-r12, pc} +END(omap_smc1) -- cgit v1.2.3 From 97b9ad1633ed3724e0563d250850763d20275da7 Mon Sep 17 00:00:00 2001 From: Francisco Alecrim Date: Wed, 10 Mar 2010 18:52:24 -0800 Subject: omap2: add USB initialization for tusb6010 Based on Kalle's and Tony's patches. Some variables re-organized and unused code removed. Signed-off-by: Kalle Valo Signed-off-by: Francisco Alecrim [tony@atomide.com: this is needed to fix the related tusb6010 DMA API changes] Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/board-n8x0.c | 98 ++++++++++++++++++++++++++++++++++++ arch/arm/mach-omap2/clock2420_data.c | 1 + 2 files changed, 99 insertions(+) diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c index 4cab0522d7ce..da9bcb898991 100644 --- a/arch/arm/mach-omap2/board-n8x0.c +++ b/arch/arm/mach-omap2/board-n8x0.c @@ -37,6 +37,103 @@ static int slot1_cover_open; static int slot2_cover_open; static struct device *mmc_device; +#define TUSB6010_ASYNC_CS 1 +#define TUSB6010_SYNC_CS 4 +#define TUSB6010_GPIO_INT 58 +#define TUSB6010_GPIO_ENABLE 0 +#define TUSB6010_DMACHAN 0x3f + +#if defined(CONFIG_USB_TUSB6010) || \ + defined(CONFIG_USB_TUSB6010_MODULE) +/* + * Enable or disable power to TUSB6010. When enabling, turn on 3.3 V and + * 1.5 V voltage regulators of PM companion chip. Companion chip will then + * provide then PGOOD signal to TUSB6010 which will release it from reset. + */ +static int tusb_set_power(int state) +{ + int i, retval = 0; + + if (state) { + gpio_set_value(TUSB6010_GPIO_ENABLE, 1); + msleep(1); + + /* Wait until TUSB6010 pulls INT pin down */ + i = 100; + while (i && gpio_get_value(TUSB6010_GPIO_INT)) { + msleep(1); + i--; + } + + if (!i) { + printk(KERN_ERR "tusb: powerup failed\n"); + retval = -ENODEV; + } + } else { + gpio_set_value(TUSB6010_GPIO_ENABLE, 0); + msleep(10); + } + + return retval; +} + +static struct musb_hdrc_config musb_config = { + .multipoint = 1, + .dyn_fifo = 1, + .num_eps = 16, + .ram_bits = 12, +}; + +static struct musb_hdrc_platform_data tusb_data = { +#if defined(CONFIG_USB_MUSB_OTG) + .mode = MUSB_OTG, +#elif defined(CONFIG_USB_MUSB_PERIPHERAL) + .mode = MUSB_PERIPHERAL, +#else /* defined(CONFIG_USB_MUSB_HOST) */ + .mode = MUSB_HOST, +#endif + .set_power = tusb_set_power, + .min_power = 25, /* x2 = 50 mA drawn from VBUS as peripheral */ + .power = 100, /* Max 100 mA VBUS for host mode */ + .config = &musb_config, +}; + +static void __init n8x0_usb_init(void) +{ + int ret = 0; + static char announce[] __initdata = KERN_INFO "TUSB 6010\n"; + + /* PM companion chip power control pin */ + ret = gpio_request(TUSB6010_GPIO_ENABLE, "TUSB6010 enable"); + if (ret != 0) { + printk(KERN_ERR "Could not get TUSB power GPIO%i\n", + TUSB6010_GPIO_ENABLE); + return; + } + gpio_direction_output(TUSB6010_GPIO_ENABLE, 0); + + tusb_set_power(0); + + ret = tusb6010_setup_interface(&tusb_data, TUSB6010_REFCLK_19, 2, + TUSB6010_ASYNC_CS, TUSB6010_SYNC_CS, + TUSB6010_GPIO_INT, TUSB6010_DMACHAN); + if (ret != 0) + goto err; + + printk(announce); + + return; + +err: + gpio_free(TUSB6010_GPIO_ENABLE); +} +#else + +static void __init n8x0_usb_init(void) {} + +#endif /*CONFIG_USB_TUSB6010 */ + + static struct omap2_mcspi_device_config p54spi_mcspi_config = { .turbo_mode = 0, .single_channel = 1, @@ -562,6 +659,7 @@ static void __init n8x0_init_machine(void) n8x0_menelaus_init(); n8x0_onenand_init(); n8x0_mmc_init(); + n8x0_usb_init(); } MACHINE_START(NOKIA_N800, "Nokia N800") diff --git a/arch/arm/mach-omap2/clock2420_data.c b/arch/arm/mach-omap2/clock2420_data.c index f12af95ead45..d932b142d0b6 100644 --- a/arch/arm/mach-omap2/clock2420_data.c +++ b/arch/arm/mach-omap2/clock2420_data.c @@ -1841,6 +1841,7 @@ static struct omap_clk omap2420_clks[] = { CLK(NULL, "aes_ick", &aes_ick, CK_242X), CLK(NULL, "pka_ick", &pka_ick, CK_242X), CLK(NULL, "usb_fck", &usb_fck, CK_242X), + CLK("musb_hdrc", "fck", &osc_ck, CK_242X), }; /* -- cgit v1.2.3 From 79cf5bff25bc30c31fd7e4782c36947ec4bace7f Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Wed, 10 Mar 2010 18:55:31 -0800 Subject: omap2: Update n8x0 defconfig to test multi-omap and DMA api changes Recent DMA API changes broke compile for tusb6010. While testing the fixes for tusb6010, I had to update the n8x0 defconfig quite a bit. Might as well merge it while at it to make it more usable as we're using this to test the multi-omap booting between V6 and V7 ARMs. Also, anybody using n8x0 with a current kernel will most likely want to mount root on the MMC instead of the onenand to keep the Maemo install intact. Enable I2C, REGULATOR, MMC, MFD, PM, and USB. Also change the root to /dev/mmcblk0p2 instead of the onenand. Signed-off-by: Tony Lindgren --- arch/arm/configs/n8x0_defconfig | 160 +++++++++++++++++++++++++++++++++++----- 1 file changed, 142 insertions(+), 18 deletions(-) diff --git a/arch/arm/configs/n8x0_defconfig b/arch/arm/configs/n8x0_defconfig index e6f667c5e58a..216ad00948af 100644 --- a/arch/arm/configs/n8x0_defconfig +++ b/arch/arm/configs/n8x0_defconfig @@ -191,6 +191,7 @@ CONFIG_ARCH_OMAP=y # CONFIG_ARCH_OMAP_OTG=y # CONFIG_ARCH_OMAP1 is not set +CONFIG_ARCH_OMAP2PLUS=y CONFIG_ARCH_OMAP2=y # CONFIG_ARCH_OMAP3 is not set # CONFIG_ARCH_OMAP4 is not set @@ -198,8 +199,6 @@ CONFIG_ARCH_OMAP2=y # # OMAP Feature Selections # -# CONFIG_OMAP_DEBUG_POWERDOMAIN is not set -# CONFIG_OMAP_DEBUG_CLOCKDOMAIN is not set CONFIG_OMAP_RESET_CLOCKS=y # CONFIG_OMAP_MUX is not set # CONFIG_OMAP_MCBSP is not set @@ -208,15 +207,13 @@ CONFIG_OMAP_MBOX_FWK=y CONFIG_OMAP_32K_TIMER=y CONFIG_OMAP_32K_TIMER_HZ=128 CONFIG_OMAP_DM_TIMER=y -# CONFIG_OMAP_LL_DEBUG_UART1 is not set -# CONFIG_OMAP_LL_DEBUG_UART2 is not set -CONFIG_OMAP_LL_DEBUG_UART3=y +# CONFIG_OMAP_PM_NONE is not set +CONFIG_OMAP_PM_NOOP=y # CONFIG_MACH_OMAP_GENERIC is not set # # OMAP Core Type # -CONFIG_ARCH_OMAP24XX=y CONFIG_ARCH_OMAP2420=y # CONFIG_ARCH_OMAP2430 is not set @@ -227,6 +224,9 @@ CONFIG_MACH_OMAP2_TUSB6010=y # CONFIG_MACH_OMAP_H4 is not set # CONFIG_MACH_OMAP_APOLLON is not set # CONFIG_MACH_OMAP_2430SDP is not set +CONFIG_MACH_NOKIA_N800=y +CONFIG_MACH_NOKIA_N810=y +CONFIG_MACH_NOKIA_N810_WIMAX=y CONFIG_MACH_NOKIA_N8X0=y # @@ -303,7 +303,7 @@ CONFIG_ALIGNMENT_TRAP=y CONFIG_ZBOOT_ROM_TEXT=0x10C08000 CONFIG_ZBOOT_ROM_BSS=0x10200000 # CONFIG_ZBOOT_ROM is not set -CONFIG_CMDLINE="root=1f03 rootfstype=jffs2 console=ttyS2,115200n8" +CONFIG_CMDLINE="root=/dev/mmcblk0p2 console=ttyS2,115200n8 debug earlyprintk rootwait" # CONFIG_XIP_KERNEL is not set # CONFIG_KEXEC is not set @@ -337,7 +337,14 @@ CONFIG_HAVE_AOUT=y # # Power management options # -# CONFIG_PM is not set +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_SLEEP=y +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +# CONFIG_APM_EMULATION is not set +CONFIG_PM_RUNTIME=y +CONFIG_PM_OPS=y CONFIG_ARCH_SUSPEND_POSSIBLE=y CONFIG_NET=y @@ -617,7 +624,55 @@ CONFIG_UNIX98_PTYS=y # CONFIG_R3964 is not set # CONFIG_RAW_DRIVER is not set # CONFIG_TCG_TPM is not set -# CONFIG_I2C is not set +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +# CONFIG_I2C_COMPAT is not set +# CONFIG_I2C_CHARDEV is not set +# CONFIG_I2C_HELPER_AUTO is not set +# CONFIG_I2C_SMBUS is not set + +# +# I2C Algorithms +# +# CONFIG_I2C_ALGOBIT is not set +# CONFIG_I2C_ALGOPCF is not set +# CONFIG_I2C_ALGOPCA is not set + +# +# I2C Hardware Bus support +# + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_DESIGNWARE is not set +# CONFIG_I2C_GPIO is not set +# CONFIG_I2C_OCORES is not set +CONFIG_I2C_OMAP=y +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set +# CONFIG_I2C_TINY_USB is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_STUB is not set + +# +# Miscellaneous I2C Chip support +# +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# CONFIG_I2C_DEBUG_CHIP is not set CONFIG_SPI=y # CONFIG_SPI_DEBUG is not set CONFIG_SPI_MASTER=y @@ -673,15 +728,44 @@ CONFIG_SSB_POSSIBLE=y # Multifunction device drivers # # CONFIG_MFD_CORE is not set +# CONFIG_MFD_88PM860X is not set # CONFIG_MFD_SM501 is not set # CONFIG_MFD_ASIC3 is not set # CONFIG_HTC_EGPIO is not set # CONFIG_HTC_PASIC3 is not set +# CONFIG_HTC_I2CPLD is not set +# CONFIG_TPS65010 is not set +CONFIG_MENELAUS=y +# CONFIG_TWL4030_CORE is not set # CONFIG_MFD_TMIO is not set # CONFIG_MFD_T7L66XB is not set # CONFIG_MFD_TC6387XB is not set # CONFIG_MFD_TC6393XB is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_MC13783 is not set +# CONFIG_AB3100_CORE is not set # CONFIG_EZX_PCAP is not set +# CONFIG_AB4500_CORE is not set +CONFIG_REGULATOR=y +# CONFIG_REGULATOR_DEBUG is not set +# CONFIG_REGULATOR_DUMMY is not set +# CONFIG_REGULATOR_FIXED_VOLTAGE is not set +# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set +# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set +# CONFIG_REGULATOR_BQ24022 is not set +# CONFIG_REGULATOR_MAX1586 is not set +# CONFIG_REGULATOR_MAX8649 is not set +# CONFIG_REGULATOR_MAX8660 is not set +# CONFIG_REGULATOR_LP3971 is not set +# CONFIG_REGULATOR_TPS65023 is not set +# CONFIG_REGULATOR_TPS6507X is not set # CONFIG_MEDIA_SUPPORT is not set # @@ -718,7 +802,10 @@ CONFIG_USB_ANNOUNCE_NEW_DEVICES=y CONFIG_USB_DEVICEFS=y CONFIG_USB_DEVICE_CLASS=y # CONFIG_USB_DYNAMIC_MINORS is not set -# CONFIG_USB_OTG is not set +CONFIG_USB_SUSPEND=y +CONFIG_USB_OTG=y +# CONFIG_USB_OTG_WHITELIST is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set # CONFIG_USB_MON is not set # CONFIG_USB_WUSB is not set # CONFIG_USB_WUSB_CBAF is not set @@ -737,9 +824,10 @@ CONFIG_USB_DEVICE_CLASS=y CONFIG_USB_MUSB_HDRC=y CONFIG_USB_TUSB6010=y # CONFIG_USB_MUSB_HOST is not set -CONFIG_USB_MUSB_PERIPHERAL=y -# CONFIG_USB_MUSB_OTG is not set +# CONFIG_USB_MUSB_PERIPHERAL is not set +CONFIG_USB_MUSB_OTG=y CONFIG_USB_GADGET_MUSB_HDRC=y +CONFIG_USB_MUSB_HDRC_HCD=y # CONFIG_MUSB_PIO_ONLY is not set # CONFIG_USB_INVENTRA_DMA is not set # CONFIG_USB_TI_CPPI_DMA is not set @@ -824,44 +912,77 @@ CONFIG_USB_GADGET_DUALSPEED=y # CONFIG_USB_ZERO is not set # CONFIG_USB_AUDIO is not set CONFIG_USB_ETH=y -# CONFIG_USB_ETH_RNDIS is not set +CONFIG_USB_ETH_RNDIS=y +CONFIG_USB_ETH_EEM=y # CONFIG_USB_GADGETFS is not set # CONFIG_USB_FILE_STORAGE is not set +# CONFIG_USB_MASS_STORAGE is not set # CONFIG_USB_G_SERIAL is not set # CONFIG_USB_MIDI_GADGET is not set # CONFIG_USB_G_PRINTER is not set # CONFIG_USB_CDC_COMPOSITE is not set +# CONFIG_USB_G_NOKIA is not set +# CONFIG_USB_G_MULTI is not set # # OTG and related infrastructure # CONFIG_USB_OTG_UTILS=y # CONFIG_USB_GPIO_VBUS is not set +# CONFIG_ISP1301_OMAP is not set +# CONFIG_USB_ULPI is not set CONFIG_NOP_USB_XCEIV=y -# CONFIG_MMC is not set +CONFIG_MMC=y +# CONFIG_MMC_DEBUG is not set +# CONFIG_MMC_UNSAFE_RESUME is not set + +# +# MMC/SD/SDIO Card Drivers +# +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_BOUNCE=y +# CONFIG_SDIO_UART is not set +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_SDHCI is not set +CONFIG_MMC_OMAP=y +# CONFIG_MMC_SPI is not set # CONFIG_MEMSTICK is not set -# CONFIG_ACCESSIBILITY is not set # CONFIG_NEW_LEDS is not set +# CONFIG_ACCESSIBILITY is not set CONFIG_RTC_LIB=y # CONFIG_RTC_CLASS is not set # CONFIG_DMADEVICES is not set # CONFIG_AUXDISPLAY is not set -# CONFIG_REGULATOR is not set # CONFIG_UIO is not set + +# +# TI VLYNQ +# # CONFIG_STAGING is not set # # File systems # # CONFIG_EXT2_FS is not set -# CONFIG_EXT3_FS is not set +CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_EXT3_FS_XATTR=y +# CONFIG_EXT3_FS_POSIX_ACL is not set +# CONFIG_EXT3_FS_SECURITY is not set # CONFIG_EXT4_FS is not set +CONFIG_JBD=y +CONFIG_FS_MBCACHE=y # CONFIG_REISERFS_FS is not set # CONFIG_JFS_FS is not set # CONFIG_FS_POSIX_ACL is not set # CONFIG_XFS_FS is not set # CONFIG_OCFS2_FS is not set # CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set CONFIG_FILE_LOCKING=y CONFIG_FSNOTIFY=y CONFIG_DNOTIFY=y @@ -886,8 +1007,11 @@ CONFIG_INOTIFY_USER=y # # DOS/FAT/NT Filesystems # +CONFIG_FAT_FS=y # CONFIG_MSDOS_FS is not set -# CONFIG_VFAT_FS is not set +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" # CONFIG_NTFS_FS is not set # -- cgit v1.2.3 From 9f591fd76afdc0e5192e9ed00a36f8efc0b4dfe6 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 11 Mar 2010 15:53:11 -0300 Subject: perf record: Don't try to find buildids in a zero sized file MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixing this symptom: [acme@mica linux-2.6-tip]$ perf record -a -f Fatal: Permission error - are you root? Bus error [acme@mica linux-2.6-tip]$ I.e. if for some reason no data is collected, in this case a non root user trying to do systemwide profiling, no data will be collected, and then we end up trying to mmap a zero sized file and access the file header, b00m. Reported-by: Ingo Molnar Signed-off-by: Arnaldo Carvalho de Melo Cc: Frédéric Weisbecker Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Paul Mackerras Cc: LKML-Reference: <1268333592-30872-1-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar --- tools/perf/builtin-record.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index b09d3b27ca14..3b8b6387c47c 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -395,6 +395,9 @@ static int process_buildids(void) { u64 size = lseek(output, 0, SEEK_CUR); + if (size == 0) + return 0; + session->fd = output; return __perf_session__process_events(session, post_processing_offset, size - post_processing_offset, -- cgit v1.2.3 From 9e542f37ce20428170010baa36a0ecbfcc0b29bb Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Thu, 11 Mar 2010 11:06:56 -0800 Subject: omap: Enable PM_RUNTIME in defconfigs to avoid USB compile errors While waiting for the related USB patch, fix compile by enabling it in the defconfigs. As discussed at: http://thread.gmane.org/gmane.linux.usb.general/27432/focus=4460 Otherwise we'll get errors like: drivers/usb/core/hcd.c:1892: error: 'pm_wq' undeclared (first use in this function) drivers/usb/core/hcd.c:1892: error: (Each undeclared identifier is reported only once drivers/usb/core/hcd.c:1892: error: for each function it appears in.) Signed-off-by: Tony Lindgren --- arch/arm/configs/cm_t35_defconfig | 2 +- arch/arm/configs/n770_defconfig | 1 + arch/arm/configs/omap3_beagle_defconfig | 1 + arch/arm/configs/omap3_defconfig | 2 +- arch/arm/configs/omap3_evm_defconfig | 1 + arch/arm/configs/omap3_touchbook_defconfig | 2 +- arch/arm/configs/omap_3430sdp_defconfig | 1 + arch/arm/configs/omap_3630sdp_defconfig | 2 +- arch/arm/configs/omap_h2_1610_defconfig | 1 + arch/arm/configs/omap_zoom2_defconfig | 1 + arch/arm/configs/omap_zoom3_defconfig | 2 +- arch/arm/configs/rx51_defconfig | 1 + 12 files changed, 12 insertions(+), 5 deletions(-) diff --git a/arch/arm/configs/cm_t35_defconfig b/arch/arm/configs/cm_t35_defconfig index 893cd267e075..032b49bad91f 100644 --- a/arch/arm/configs/cm_t35_defconfig +++ b/arch/arm/configs/cm_t35_defconfig @@ -358,7 +358,7 @@ CONFIG_PM_SLEEP=y CONFIG_SUSPEND=y CONFIG_SUSPEND_FREEZER=y # CONFIG_APM_EMULATION is not set -# CONFIG_PM_RUNTIME is not set +CONFIG_PM_RUNTIME=y CONFIG_ARCH_SUSPEND_POSSIBLE=y CONFIG_NET=y diff --git a/arch/arm/configs/n770_defconfig b/arch/arm/configs/n770_defconfig index 75cae18fbcb6..de0c28aa43e7 100644 --- a/arch/arm/configs/n770_defconfig +++ b/arch/arm/configs/n770_defconfig @@ -308,6 +308,7 @@ CONFIG_PM_SLEEP=y CONFIG_SUSPEND_UP_POSSIBLE=y CONFIG_SUSPEND=y # CONFIG_APM_EMULATION is not set +CONFIG_PM_RUNTIME=y # # Networking diff --git a/arch/arm/configs/omap3_beagle_defconfig b/arch/arm/configs/omap3_beagle_defconfig index c7999f5b1c9a..5a9e95fa728b 100644 --- a/arch/arm/configs/omap3_beagle_defconfig +++ b/arch/arm/configs/omap3_beagle_defconfig @@ -324,6 +324,7 @@ CONFIG_PM_SLEEP=y CONFIG_SUSPEND=y CONFIG_SUSPEND_FREEZER=y # CONFIG_APM_EMULATION is not set +CONFIG_PM_RUNTIME=y CONFIG_ARCH_SUSPEND_POSSIBLE=y CONFIG_NET=y diff --git a/arch/arm/configs/omap3_defconfig b/arch/arm/configs/omap3_defconfig index 714835e5ebec..d6ad92177324 100644 --- a/arch/arm/configs/omap3_defconfig +++ b/arch/arm/configs/omap3_defconfig @@ -450,7 +450,7 @@ CONFIG_SUSPEND=y # CONFIG_PM_TEST_SUSPEND is not set CONFIG_SUSPEND_FREEZER=y # CONFIG_APM_EMULATION is not set -# CONFIG_PM_RUNTIME is not set +CONFIG_PM_RUNTIME=y CONFIG_ARCH_SUSPEND_POSSIBLE=y CONFIG_NET=y diff --git a/arch/arm/configs/omap3_evm_defconfig b/arch/arm/configs/omap3_evm_defconfig index e2ad859fbec6..a6dd6d1af806 100644 --- a/arch/arm/configs/omap3_evm_defconfig +++ b/arch/arm/configs/omap3_evm_defconfig @@ -340,6 +340,7 @@ CONFIG_PM_SLEEP=y CONFIG_SUSPEND=y CONFIG_SUSPEND_FREEZER=y # CONFIG_APM_EMULATION is not set +CONFIG_PM_RUNTIME=y CONFIG_ARCH_SUSPEND_POSSIBLE=y CONFIG_NET=y diff --git a/arch/arm/configs/omap3_touchbook_defconfig b/arch/arm/configs/omap3_touchbook_defconfig index 74fe6be9c5ec..968fbaa8f04d 100644 --- a/arch/arm/configs/omap3_touchbook_defconfig +++ b/arch/arm/configs/omap3_touchbook_defconfig @@ -368,7 +368,7 @@ CONFIG_SUSPEND=y # CONFIG_PM_TEST_SUSPEND is not set CONFIG_SUSPEND_FREEZER=y # CONFIG_APM_EMULATION is not set -# CONFIG_PM_RUNTIME is not set +CONFIG_PM_RUNTIME=y CONFIG_ARCH_SUSPEND_POSSIBLE=y CONFIG_NET=y diff --git a/arch/arm/configs/omap_3430sdp_defconfig b/arch/arm/configs/omap_3430sdp_defconfig index bb2917e5cb47..ddde429a7d9b 100644 --- a/arch/arm/configs/omap_3430sdp_defconfig +++ b/arch/arm/configs/omap_3430sdp_defconfig @@ -363,6 +363,7 @@ CONFIG_PM_SLEEP=y CONFIG_SUSPEND=y CONFIG_SUSPEND_FREEZER=y # CONFIG_APM_EMULATION is not set +CONFIG_PM_RUNTIME=y CONFIG_ARCH_SUSPEND_POSSIBLE=y CONFIG_NET=y diff --git a/arch/arm/configs/omap_3630sdp_defconfig b/arch/arm/configs/omap_3630sdp_defconfig index d25c3d4424ca..609f348b1055 100644 --- a/arch/arm/configs/omap_3630sdp_defconfig +++ b/arch/arm/configs/omap_3630sdp_defconfig @@ -361,7 +361,7 @@ CONFIG_SUSPEND=y # CONFIG_PM_TEST_SUSPEND is not set CONFIG_SUSPEND_FREEZER=y # CONFIG_APM_EMULATION is not set -# CONFIG_PM_RUNTIME is not set +CONFIG_PM_RUNTIME=y CONFIG_ARCH_SUSPEND_POSSIBLE=y CONFIG_NET=y diff --git a/arch/arm/configs/omap_h2_1610_defconfig b/arch/arm/configs/omap_h2_1610_defconfig index 523189586a4b..91ef2ed0f80a 100644 --- a/arch/arm/configs/omap_h2_1610_defconfig +++ b/arch/arm/configs/omap_h2_1610_defconfig @@ -331,6 +331,7 @@ CONFIG_PM_SLEEP=y CONFIG_SUSPEND=y CONFIG_SUSPEND_FREEZER=y # CONFIG_APM_EMULATION is not set +CONFIG_PM_RUNTIME=y CONFIG_ARCH_SUSPEND_POSSIBLE=y # diff --git a/arch/arm/configs/omap_zoom2_defconfig b/arch/arm/configs/omap_zoom2_defconfig index a82e81332a03..f5c6e11cf189 100644 --- a/arch/arm/configs/omap_zoom2_defconfig +++ b/arch/arm/configs/omap_zoom2_defconfig @@ -343,6 +343,7 @@ CONFIG_SUSPEND=y # CONFIG_PM_TEST_SUSPEND is not set CONFIG_SUSPEND_FREEZER=y # CONFIG_APM_EMULATION is not set +CONFIG_PM_RUNTIME=y CONFIG_ARCH_SUSPEND_POSSIBLE=y CONFIG_NET=y diff --git a/arch/arm/configs/omap_zoom3_defconfig b/arch/arm/configs/omap_zoom3_defconfig index ff8ac3dcc31d..ea9a5012d332 100644 --- a/arch/arm/configs/omap_zoom3_defconfig +++ b/arch/arm/configs/omap_zoom3_defconfig @@ -361,7 +361,7 @@ CONFIG_SUSPEND=y # CONFIG_PM_TEST_SUSPEND is not set CONFIG_SUSPEND_FREEZER=y # CONFIG_APM_EMULATION is not set -# CONFIG_PM_RUNTIME is not set +CONFIG_PM_RUNTIME=y CONFIG_ARCH_SUSPEND_POSSIBLE=y CONFIG_NET=y diff --git a/arch/arm/configs/rx51_defconfig b/arch/arm/configs/rx51_defconfig index 193bd334fbbf..45135ffadc57 100644 --- a/arch/arm/configs/rx51_defconfig +++ b/arch/arm/configs/rx51_defconfig @@ -322,6 +322,7 @@ CONFIG_PM_SLEEP=y CONFIG_SUSPEND=y CONFIG_SUSPEND_FREEZER=y # CONFIG_APM_EMULATION is not set +CONFIG_PM_RUNTIME=y CONFIG_ARCH_SUSPEND_POSSIBLE=y CONFIG_NET=y -- cgit v1.2.3 From 21b90340207d324f92111e25ead1752533eeb9ca Mon Sep 17 00:00:00 2001 From: Thomas Weber Date: Thu, 25 Feb 2010 09:40:19 +0000 Subject: OMAP2: serial.c: Fix number of uarts in early_init The omap_serial_early_init prints the following errors: Could not get uart4_ick Could not get uart4_fck because all the uarts available in omap_uart[] will be initialized. Only omap4430 and omap3630 have 4 uarts at the moment. This patch reduces the number of uarts when cpu is not omap4430 or omap3630. Signed-off-by: Thomas Weber Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/serial.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c index b79bc8926cc9..da77930480e9 100644 --- a/arch/arm/mach-omap2/serial.c +++ b/arch/arm/mach-omap2/serial.c @@ -644,16 +644,21 @@ static void serial_out_override(struct uart_port *up, int offset, int value) } void __init omap_serial_early_init(void) { - int i; + int i, nr_ports; char name[16]; + if (!(cpu_is_omap3630() || cpu_is_omap4430())) + nr_ports = 3; + else + nr_ports = ARRAY_SIZE(omap_uart); + /* * Make sure the serial ports are muxed on at this point. * You have to mux them off in device drivers later on * if not needed. */ - for (i = 0; i < ARRAY_SIZE(omap_uart); i++) { + for (i = 0; i < nr_ports; i++) { struct omap_uart_state *uart = &omap_uart[i]; struct platform_device *pdev = &uart->pdev; struct device *dev = &pdev->dev; @@ -669,17 +674,17 @@ void __init omap_serial_early_init(void) continue; } - sprintf(name, "uart%d_ick", i+1); + sprintf(name, "uart%d_ick", i + 1); uart->ick = clk_get(NULL, name); if (IS_ERR(uart->ick)) { - printk(KERN_ERR "Could not get uart%d_ick\n", i+1); + printk(KERN_ERR "Could not get uart%d_ick\n", i + 1); uart->ick = NULL; } sprintf(name, "uart%d_fck", i+1); uart->fck = clk_get(NULL, name); if (IS_ERR(uart->fck)) { - printk(KERN_ERR "Could not get uart%d_fck\n", i+1); + printk(KERN_ERR "Could not get uart%d_fck\n", i + 1); uart->fck = NULL; } -- cgit v1.2.3 From 26e4248359c6bf2da4a07767f6e12f1df426ab0a Mon Sep 17 00:00:00 2001 From: Suman Anna Date: Mon, 25 Jan 2010 18:27:21 -0600 Subject: omap2/3/4: mailbox: remove compiler warning Remove a compiler warning in device-specific mailbox module. Signed-off-by: Suman Anna Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/mailbox.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/arm/mach-omap2/mailbox.c b/arch/arm/mach-omap2/mailbox.c index 52a981cb8fdd..318f3638653c 100644 --- a/arch/arm/mach-omap2/mailbox.c +++ b/arch/arm/mach-omap2/mailbox.c @@ -430,19 +430,19 @@ static int __devinit omap2_mbox_probe(struct platform_device *pdev) if (unlikely(!res)) { dev_err(&pdev->dev, "invalid irq resource\n"); ret = -ENODEV; - goto err_iva1; + omap_mbox_unregister(&mbox_dsp_info); + goto err_dsp; } mbox_iva_info.irq = res->start; ret = omap_mbox_register(&pdev->dev, &mbox_iva_info); - if (ret) - goto err_iva1; + if (ret) { + omap_mbox_unregister(&mbox_dsp_info); + goto err_dsp; + } } #endif return 0; -err_iva1: - omap_mbox_unregister(&mbox_dsp_info); - err_dsp: iounmap(mbox_base); return ret; -- cgit v1.2.3 From 29b9a218d0be8e207ddeacdc68eb1e4b7f54b85a Mon Sep 17 00:00:00 2001 From: Aaro Koskinen Date: Fri, 26 Feb 2010 10:25:28 +0000 Subject: omap: pass the reboot command to the boot loader This patch follows the commit be093beb608edf821b45fe00a8a080fb5c6ed4af by Russell King: OMAP wishes to pass state to the boot loader upon reboot in order to instruct it whether to wait for USB-based reflashing or not. There is already a facility to do this via the reboot() syscall, except we ignore the string passed to machine_restart(). The patch adds the missing parameter to omap1_arch_reset() and omap_prcm_arch_reset(), and modifies the latter to pass the reboot command parameter to the boot loader instead of reboot mode (which is for kernel internal use only and cannot be modified by the userspace). Signed-off-by: Aaro Koskinen Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/prcm.c | 4 ++-- arch/arm/plat-omap/include/plat/prcm.h | 2 +- arch/arm/plat-omap/include/plat/system.h | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/arm/mach-omap2/prcm.c b/arch/arm/mach-omap2/prcm.c index 81872aacb801..9537f6f2352d 100644 --- a/arch/arm/mach-omap2/prcm.c +++ b/arch/arm/mach-omap2/prcm.c @@ -133,7 +133,7 @@ u32 omap_prcm_get_reset_sources(void) EXPORT_SYMBOL(omap_prcm_get_reset_sources); /* Resets clock rates and reboots the system. Only called from system.h */ -void omap_prcm_arch_reset(char mode) +void omap_prcm_arch_reset(char mode, const char *cmd) { s16 prcm_offs = 0; @@ -145,7 +145,7 @@ void omap_prcm_arch_reset(char mode) u32 l; prcm_offs = OMAP3430_GR_MOD; - l = ('B' << 24) | ('M' << 16) | mode; + l = ('B' << 24) | ('M' << 16) | (cmd ? (u8)*cmd : 0); /* Reserve the first word in scratchpad for communicating * with the boot ROM. A pointer to a data structure * describing the boot process can be stored there, diff --git a/arch/arm/plat-omap/include/plat/prcm.h b/arch/arm/plat-omap/include/plat/prcm.h index d6a0e27d5a7f..9fbd91419cd1 100644 --- a/arch/arm/plat-omap/include/plat/prcm.h +++ b/arch/arm/plat-omap/include/plat/prcm.h @@ -24,7 +24,7 @@ #define __ASM_ARM_ARCH_OMAP_PRCM_H u32 omap_prcm_get_reset_sources(void); -void omap_prcm_arch_reset(char mode); +void omap_prcm_arch_reset(char mode, const char *cmd); int omap2_cm_wait_idlest(void __iomem *reg, u32 mask, u8 idlest, const char *name); diff --git a/arch/arm/plat-omap/include/plat/system.h b/arch/arm/plat-omap/include/plat/system.h index c58a4ef42a45..d0a119f735b4 100644 --- a/arch/arm/plat-omap/include/plat/system.h +++ b/arch/arm/plat-omap/include/plat/system.h @@ -22,7 +22,7 @@ static inline void arch_idle(void) cpu_do_idle(); } -static inline void omap1_arch_reset(char mode) +static inline void omap1_arch_reset(char mode, const char *cmd) { /* * Workaround for 5912/1611b bug mentioned in sprz209d.pdf p. 28 @@ -43,9 +43,9 @@ static inline void omap1_arch_reset(char mode) static inline void arch_reset(char mode, const char *cmd) { if (!cpu_class_is_omap2()) - omap1_arch_reset(mode); + omap1_arch_reset(mode, cmd); else - omap_prcm_arch_reset(mode); + omap_prcm_arch_reset(mode, cmd); } #endif -- cgit v1.2.3 From 00df9384ccfa02c062449e381c051bcfe97cdf6f Mon Sep 17 00:00:00 2001 From: Andrea Gelmini Date: Sat, 27 Feb 2010 16:51:38 +0000 Subject: omap: Checkpatch cleanup for blizzard.h arch/arm/plat-omap/include/plat/blizzard.h:9: ERROR: spaces prohibited around that ':' (ctx:WxW) Signed-off-by: Andrea Gelmini Signed-off-by: Tony Lindgren --- arch/arm/plat-omap/include/plat/blizzard.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/plat-omap/include/plat/blizzard.h b/arch/arm/plat-omap/include/plat/blizzard.h index 8d160f171372..56e7f2e7d12f 100644 --- a/arch/arm/plat-omap/include/plat/blizzard.h +++ b/arch/arm/plat-omap/include/plat/blizzard.h @@ -6,7 +6,7 @@ struct blizzard_platform_data { void (*power_down)(struct device *dev); unsigned long (*get_clock_rate)(struct device *dev); - unsigned te_connected : 1; + unsigned te_connected:1; }; #endif -- cgit v1.2.3 From 228893f9d2b70a7416267050ce03bfd9eb624713 Mon Sep 17 00:00:00 2001 From: Enric Balletbo i Serra Date: Mon, 1 Mar 2010 11:18:08 +0000 Subject: omap3: Fix support for the LEDs connected to GPIO outputs on IGEP v2board Select CONFIG_LEDS_GPIO to enable IGEP v2 LED support and control of supported LEDs from userspace. Otherwise GPIO LEDs are exported as GPIO 26, 27 and 28 using the gpiolib framework. Signed-off-by: Enric Balletbo i Serra Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/board-igep0020.c | 54 ++++++++++++++++++++++++------------ 1 file changed, 36 insertions(+), 18 deletions(-) diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c index 4f1accf7f7f3..3c7789d45051 100644 --- a/arch/arm/mach-omap2/board-igep0020.c +++ b/arch/arm/mach-omap2/board-igep0020.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include @@ -39,8 +38,8 @@ #define IGEP2_SMSC911X_CS 5 #define IGEP2_SMSC911X_GPIO 176 #define IGEP2_GPIO_USBH_NRESET 24 -#define IGEP2_GPIO_LED0_RED 26 -#define IGEP2_GPIO_LED0_GREEN 27 +#define IGEP2_GPIO_LED0_GREEN 26 +#define IGEP2_GPIO_LED0_RED 27 #define IGEP2_GPIO_LED1_RED 28 #define IGEP2_GPIO_DVI_PUP 170 #define IGEP2_GPIO_WIFI_NPD 94 @@ -355,34 +354,50 @@ static void __init igep2_display_init(void) gpio_direction_output(IGEP2_GPIO_DVI_PUP, 1)) pr_err("IGEP v2: Could not obtain gpio GPIO_DVI_PUP\n"); } -#ifdef CONFIG_LEDS_TRIGGERS -static struct gpio_led gpio_leds[] = { + +#if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE) +#include + +static struct gpio_led igep2_gpio_leds[] = { { - .name = "GPIO_LED1_RED", + .name = "led0:red", + .gpio = IGEP2_GPIO_LED0_RED, + }, + { + .name = "led0:green", .default_trigger = "heartbeat", + .gpio = IGEP2_GPIO_LED0_GREEN, + }, + { + .name = "led1:red", .gpio = IGEP2_GPIO_LED1_RED, }, }; -static struct gpio_led_platform_data gpio_leds_info = { - .leds = gpio_leds, - .num_leds = ARRAY_SIZE(gpio_leds), +static struct gpio_led_platform_data igep2_led_pdata = { + .leds = igep2_gpio_leds, + .num_leds = ARRAY_SIZE(igep2_gpio_leds), }; -static struct platform_device leds_gpio = { +static struct platform_device igep2_led_device = { .name = "leds-gpio", .id = -1, .dev = { - .platform_data = &gpio_leds_info, + .platform_data = &igep2_led_pdata, }, }; + +static void __init igep2_init_led(void) +{ + platform_device_register(&igep2_led_device); +} + +#else +static inline void igep2_init_led(void) {} #endif static struct platform_device *igep2_devices[] __initdata = { &igep2_dss_device, -#ifdef CONFIG_LEDS_TRIGGERS - &leds_gpio, -#endif }; static void __init igep2_init_irq(void) @@ -471,31 +486,34 @@ static void __init igep2_init(void) usb_ehci_init(&ehci_pdata); igep2_flash_init(); + igep2_init_led(); igep2_display_init(); igep2_init_smsc911x(); /* GPIO userspace leds */ - if ((gpio_request(IGEP2_GPIO_LED0_RED, "GPIO_LED0_RED") == 0) && +#if !defined(CONFIG_LEDS_GPIO) && !defined(CONFIG_LEDS_GPIO_MODULE) + if ((gpio_request(IGEP2_GPIO_LED0_RED, "led0:red") == 0) && (gpio_direction_output(IGEP2_GPIO_LED0_RED, 1) == 0)) { gpio_export(IGEP2_GPIO_LED0_RED, 0); gpio_set_value(IGEP2_GPIO_LED0_RED, 0); } else pr_warning("IGEP v2: Could not obtain gpio GPIO_LED0_RED\n"); - if ((gpio_request(IGEP2_GPIO_LED0_GREEN, "GPIO_LED0_GREEN") == 0) && + if ((gpio_request(IGEP2_GPIO_LED0_GREEN, "led0:green") == 0) && (gpio_direction_output(IGEP2_GPIO_LED0_GREEN, 1) == 0)) { gpio_export(IGEP2_GPIO_LED0_GREEN, 0); gpio_set_value(IGEP2_GPIO_LED0_GREEN, 0); } else pr_warning("IGEP v2: Could not obtain gpio GPIO_LED0_GREEN\n"); -#ifndef CONFIG_LEDS_TRIGGERS - if ((gpio_request(IGEP2_GPIO_LED1_RED, "GPIO_LED1_RED") == 0) && + + if ((gpio_request(IGEP2_GPIO_LED1_RED, "led1:red") == 0) && (gpio_direction_output(IGEP2_GPIO_LED1_RED, 1) == 0)) { gpio_export(IGEP2_GPIO_LED1_RED, 0); gpio_set_value(IGEP2_GPIO_LED1_RED, 0); } else pr_warning("IGEP v2: Could not obtain gpio GPIO_LED1_RED\n"); #endif + /* GPIO W-LAN + Bluetooth combo module */ if ((gpio_request(IGEP2_GPIO_WIFI_NPD, "GPIO_WIFI_NPD") == 0) && (gpio_direction_output(IGEP2_GPIO_WIFI_NPD, 1) == 0)) { -- cgit v1.2.3 From f00d649703fbedbdeb33d63c4c4731656e11297c Mon Sep 17 00:00:00 2001 From: Sergio Aguirre Date: Wed, 3 Mar 2010 16:21:08 +0000 Subject: omap: Fix gpio_resume_after_retention For omap4 case, this was wrongly writing GPIO_LEVELDETECTx registers with OMAP24XX_ offset and OMAP4_ offset. Bug introduced in commit: commit 3f1686a9bfe74979c6ad538c78039730f665f77e Author: Tony Lindgren Date: Mon Feb 15 09:27:25 2010 -0800 omap: Fix gpio.c for multi-omap for omap4 Signed-off-by: Sergio Aguirre Signed-off-by: Tony Lindgren --- arch/arm/plat-omap/gpio.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c index 337199ed3479..76a347b3ce07 100644 --- a/arch/arm/plat-omap/gpio.c +++ b/arch/arm/plat-omap/gpio.c @@ -2140,18 +2140,18 @@ void omap2_gpio_resume_after_retention(void) if (gen) { u32 old0, old1; - if (cpu_is_omap24xx() || cpu_is_omap44xx()) { + if (cpu_is_omap24xx() || cpu_is_omap34xx()) { old0 = __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT0); old1 = __raw_readl(bank->base + OMAP24XX_GPIO_LEVELDETECT1); - __raw_writel(old0 | gen, bank->base + + __raw_writel(old0 | gen, bank->base + OMAP24XX_GPIO_LEVELDETECT0); - __raw_writel(old1 | gen, bank->base + + __raw_writel(old1 | gen, bank->base + OMAP24XX_GPIO_LEVELDETECT1); - __raw_writel(old0, bank->base + + __raw_writel(old0, bank->base + OMAP24XX_GPIO_LEVELDETECT0); - __raw_writel(old1, bank->base + + __raw_writel(old1, bank->base + OMAP24XX_GPIO_LEVELDETECT1); } -- cgit v1.2.3 From 8185e468446e1e2b383bd61210702fffaed3ddc0 Mon Sep 17 00:00:00 2001 From: Aaro Koskinen Date: Wed, 3 Mar 2010 16:24:53 +0000 Subject: omap: mach-omap2/io.c: fix function declarations Get rid of the following warnings: warning: non-ANSI function declaration of function [...] Signed-off-by: Aaro Koskinen Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/io.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c index 402e8f0d0f21..ae89d55132d1 100644 --- a/arch/arm/mach-omap2/io.c +++ b/arch/arm/mach-omap2/io.c @@ -237,7 +237,7 @@ static void __init _omap2_map_common_io(void) } #ifdef CONFIG_ARCH_OMAP2420 -void __init omap242x_map_common_io() +void __init omap242x_map_common_io(void) { iotable_init(omap24xx_io_desc, ARRAY_SIZE(omap24xx_io_desc)); iotable_init(omap242x_io_desc, ARRAY_SIZE(omap242x_io_desc)); @@ -246,7 +246,7 @@ void __init omap242x_map_common_io() #endif #ifdef CONFIG_ARCH_OMAP2430 -void __init omap243x_map_common_io() +void __init omap243x_map_common_io(void) { iotable_init(omap24xx_io_desc, ARRAY_SIZE(omap24xx_io_desc)); iotable_init(omap243x_io_desc, ARRAY_SIZE(omap243x_io_desc)); @@ -255,7 +255,7 @@ void __init omap243x_map_common_io() #endif #ifdef CONFIG_ARCH_OMAP3 -void __init omap34xx_map_common_io() +void __init omap34xx_map_common_io(void) { iotable_init(omap34xx_io_desc, ARRAY_SIZE(omap34xx_io_desc)); _omap2_map_common_io(); @@ -263,7 +263,7 @@ void __init omap34xx_map_common_io() #endif #ifdef CONFIG_ARCH_OMAP4 -void __init omap44xx_map_common_io() +void __init omap44xx_map_common_io(void) { iotable_init(omap44xx_io_desc, ARRAY_SIZE(omap44xx_io_desc)); _omap2_map_common_io(); -- cgit v1.2.3 From 03d5671dde04a186471969d73cfc89ee02664c2c Mon Sep 17 00:00:00 2001 From: Grazvydas Ignotas Date: Wed, 10 Mar 2010 11:13:53 +0000 Subject: omap3: pandora: add missing i2c3 board_info This will allow BQ27500 fuel gauge to function. Signed-off-by: Grazvydas Ignotas Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/board-omap3pandora.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c index 51a5315519c4..395d049bf010 100644 --- a/arch/arm/mach-omap2/board-omap3pandora.c +++ b/arch/arm/mach-omap2/board-omap3pandora.c @@ -459,12 +459,20 @@ static struct i2c_board_info __initdata omap3pandora_i2c_boardinfo[] = { }, }; +static struct i2c_board_info __initdata omap3pandora_i2c3_boardinfo[] = { + { + I2C_BOARD_INFO("bq27500", 0x55), + .flags = I2C_CLIENT_WAKE, + }, +}; + static int __init omap3pandora_i2c_init(void) { omap_register_i2c_bus(1, 2600, omap3pandora_i2c_boardinfo, ARRAY_SIZE(omap3pandora_i2c_boardinfo)); /* i2c2 pins are not connected */ - omap_register_i2c_bus(3, 100, NULL, 0); + omap_register_i2c_bus(3, 100, omap3pandora_i2c3_boardinfo, + ARRAY_SIZE(omap3pandora_i2c3_boardinfo)); return 0; } -- cgit v1.2.3 From 54c44fb7df4774280a2deb4ba191cbab84602413 Mon Sep 17 00:00:00 2001 From: Kevin Hilman Date: Wed, 10 Mar 2010 17:16:30 +0000 Subject: OMAP2: cpu_is_omap2*: fix compile-time removal of unused code Currently if omap2420 is defined but not omap2430, cpu_is_omap2430() is still defined as a macro, instead of #define'd to zero. This results in conditional cpu_is_omap2430() code still being compiled, and leads to possible compile/link errors. In particular for hwmod init. To fix, add extra #ifdefs to CPU check macros to ensure that the is_omap* macros are zero for each OMAP2 if they are not configured into the kernel. Tested-by: Santosh Shilimkar Signed-off-by: Kevin Hilman Signed-off-by: Tony Lindgren --- arch/arm/plat-omap/include/plat/cpu.h | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/arm/plat-omap/include/plat/cpu.h b/arch/arm/plat-omap/include/plat/cpu.h index ed8786c41df2..75141742300c 100644 --- a/arch/arm/plat-omap/include/plat/cpu.h +++ b/arch/arm/plat-omap/include/plat/cpu.h @@ -167,10 +167,14 @@ IS_OMAP_SUBCLASS(443x, 0x443) #if defined(MULTI_OMAP2) # if defined(CONFIG_ARCH_OMAP2) # undef cpu_is_omap24xx -# undef cpu_is_omap242x -# undef cpu_is_omap243x # define cpu_is_omap24xx() is_omap24xx() +# endif +# if defined (CONFIG_ARCH_OMAP2420) +# undef cpu_is_omap242x # define cpu_is_omap242x() is_omap242x() +# endif +# if defined (CONFIG_ARCH_OMAP2430) +# undef cpu_is_omap243x # define cpu_is_omap243x() is_omap243x() # endif # if defined(CONFIG_ARCH_OMAP3) -- cgit v1.2.3 From aa4b1f6e83aaf20997edc4c10e03baf834343e5a Mon Sep 17 00:00:00 2001 From: Kevin Hilman Date: Wed, 10 Mar 2010 17:16:31 +0000 Subject: OMAP4: fix temporary hacks that break multi-omap PM When building for multi-omap, and OMAP4 is enabled, CONFIG_ARCH_OMAP4 will be true and prevent included code from building/running for OMAP2/3 as well. This problem exists in io.c where some hwmod/PM/SDRC init code is prevented from running even on OMAP2/3 when OMAP4 is included in a multi-OMAP build. A quick glance suggests that this #ifndef is no longer needed in most of the cases. In the remaining cases, the function is wrapped with "if (cpu_is_omap24xx() || cpu_is_omap34xx())" which will be optimized out for OMAP4-only builds. Note that this is only a short-term fix. Longer-term, OMAP4 needs to create init functions for SDRC and hwmod late-init. Tested-by: Santosh Shilimkar Signed-off-by: Kevin Hilman Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/io.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c index ae89d55132d1..87f676acf61d 100644 --- a/arch/arm/mach-omap2/io.c +++ b/arch/arm/mach-omap2/io.c @@ -309,7 +309,6 @@ void __init omap2_init_common_hw(struct omap_sdrc_params *sdrc_cs0, { pwrdm_init(powerdomains_omap); clkdm_init(clockdomains_omap, clkdm_autodeps); -#ifndef CONFIG_ARCH_OMAP4 /* FIXME: Remove this once the clkdev is ready */ if (cpu_is_omap242x()) omap2420_hwmod_init(); else if (cpu_is_omap243x()) @@ -319,7 +318,6 @@ void __init omap2_init_common_hw(struct omap_sdrc_params *sdrc_cs0, omap2_mux_init(); /* The OPP tables have to be registered before a clk init */ omap_pm_if_early_init(mpu_opps, dsp_opps, l3_opps); -#endif if (cpu_is_omap2420()) omap2420_clk_init(); @@ -333,11 +331,12 @@ void __init omap2_init_common_hw(struct omap_sdrc_params *sdrc_cs0, pr_err("Could not init clock framework - unknown CPU\n"); omap_serial_early_init(); -#ifndef CONFIG_ARCH_OMAP4 - omap_hwmod_late_init(); + if (cpu_is_omap24xx() || cpu_is_omap34xx()) /* FIXME: OMAP4 */ + omap_hwmod_late_init(); omap_pm_if_init(); - omap2_sdrc_init(sdrc_cs0, sdrc_cs1); - _omap2_init_reprogram_sdrc(); -#endif + if (cpu_is_omap24xx() || cpu_is_omap34xx()) { + omap2_sdrc_init(sdrc_cs0, sdrc_cs1); + _omap2_init_reprogram_sdrc(); + } gpmc_init(); } -- cgit v1.2.3 From 0841cb826859a4f14c472cc75a782811310f010e Mon Sep 17 00:00:00 2001 From: Janusz Krzysztofik Date: Tue, 23 Feb 2010 15:50:38 +0000 Subject: omap: McBSP: Drop unnecessary status/error bit clearing on reg_cacheretrieved register values The MsBSP register cache will never have any error/status flags set, since these flags are never written to the reg_cache. So it is kind of not necessary to clear these flags, which are actually always 0. In other words, clearing the status/error flags are not necessary, since the reg_cache will never got these bits set. We can just write back the register content from the cache as it is when clearing an error condition. Tested on Amstrad Delta. Reported-by: Peter Ujfalusi Signed-off-by: Janusz Krzysztofik Acked-by: Jarkko Nikula Signed-off-by: Tony Lindgren --- arch/arm/plat-omap/mcbsp.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c index e47686e0a633..52dfcc81511e 100644 --- a/arch/arm/plat-omap/mcbsp.c +++ b/arch/arm/plat-omap/mcbsp.c @@ -133,8 +133,7 @@ static irqreturn_t omap_mcbsp_tx_irq_handler(int irq, void *dev_id) dev_err(mcbsp_tx->dev, "TX Frame Sync Error! : 0x%x\n", irqst_spcr2); /* Writing zero to XSYNC_ERR clears the IRQ */ - MCBSP_WRITE(mcbsp_tx, SPCR2, - MCBSP_READ_CACHE(mcbsp_tx, SPCR2) & ~(XSYNC_ERR)); + MCBSP_WRITE(mcbsp_tx, SPCR2, MCBSP_READ_CACHE(mcbsp_tx, SPCR2)); } else { complete(&mcbsp_tx->tx_irq_completion); } @@ -154,8 +153,7 @@ static irqreturn_t omap_mcbsp_rx_irq_handler(int irq, void *dev_id) dev_err(mcbsp_rx->dev, "RX Frame Sync Error! : 0x%x\n", irqst_spcr1); /* Writing zero to RSYNC_ERR clears the IRQ */ - MCBSP_WRITE(mcbsp_rx, SPCR1, - MCBSP_READ_CACHE(mcbsp_rx, SPCR1) & ~(RSYNC_ERR)); + MCBSP_WRITE(mcbsp_rx, SPCR1, MCBSP_READ_CACHE(mcbsp_rx, SPCR1)); } else { complete(&mcbsp_rx->tx_irq_completion); } @@ -934,8 +932,7 @@ int omap_mcbsp_pollwrite(unsigned int id, u16 buf) /* if frame sync error - clear the error */ if (MCBSP_READ(mcbsp, SPCR2) & XSYNC_ERR) { /* clear error */ - MCBSP_WRITE(mcbsp, SPCR2, - MCBSP_READ_CACHE(mcbsp, SPCR2) & (~XSYNC_ERR)); + MCBSP_WRITE(mcbsp, SPCR2, MCBSP_READ_CACHE(mcbsp, SPCR2)); /* resend */ return -1; } else { @@ -975,8 +972,7 @@ int omap_mcbsp_pollread(unsigned int id, u16 *buf) /* if frame sync error - clear the error */ if (MCBSP_READ(mcbsp, SPCR1) & RSYNC_ERR) { /* clear error */ - MCBSP_WRITE(mcbsp, SPCR1, - MCBSP_READ_CACHE(mcbsp, SPCR1) & (~RSYNC_ERR)); + MCBSP_WRITE(mcbsp, SPCR1, MCBSP_READ_CACHE(mcbsp, SPCR1)); /* resend */ return -1; } else { -- cgit v1.2.3 From 29b2ee5af5f3a02846bd38a1e2121d62ee5f6aca Mon Sep 17 00:00:00 2001 From: Andrew Clayton Date: Sat, 6 Mar 2010 00:14:46 +0000 Subject: ARM/OMAP: Remove the +x bit from a couple of source files Remove the +x bit from a couple of source files Signed-off-by: Andrew Clayton Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/board-3630sdp.c | 0 arch/arm/mach-omap2/board-zoom-peripherals.c | 0 2 files changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 arch/arm/mach-omap2/board-3630sdp.c mode change 100755 => 100644 arch/arm/mach-omap2/board-zoom-peripherals.c diff --git a/arch/arm/mach-omap2/board-3630sdp.c b/arch/arm/mach-omap2/board-3630sdp.c old mode 100755 new mode 100644 diff --git a/arch/arm/mach-omap2/board-zoom-peripherals.c b/arch/arm/mach-omap2/board-zoom-peripherals.c old mode 100755 new mode 100644 -- cgit v1.2.3 From a2302b45d8ab41a55e84c39a6c6f813586ad8493 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 10 Mar 2010 19:10:28 +0100 Subject: ARM: 5985/2: ARM: Fix Samsung build after "ARM: Eliminate decompressor -Dstatic= PIC hack" Commit 5de813b6 (ARM: Eliminate decompressor -Dstatic= PIC hack) among other things changed the declared type of the error() function to an extern, conflicting with the forward declartion in the Samsung plat/uncompress.h which appears to have been relying on the static being defined away, causing build failures since error() ends up with a GOT relocation but the linker script discards all GOT relocated data and functions: arch/arm/boot/compressed/decompress.o: In function `gunzip': /home/broonie/git/linux-2.6/arch/arm/boot/compressed/../../../../lib/decompress_ +inflate.c:68: undefined reference to `error' and so on. Fix this by moving the declaration into uncompress/misc.c where it is shared with the rest of the code, correcting the definition as we go. Signed-off-by: Mark Brown Signed-off-by: Russell King --- arch/arm/boot/compressed/misc.c | 1 + arch/arm/plat-samsung/include/plat/uncompress.h | 2 -- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/arm/boot/compressed/misc.c b/arch/arm/boot/compressed/misc.c index d32bc71c1f78..d2b2ef41cd4f 100644 --- a/arch/arm/boot/compressed/misc.c +++ b/arch/arm/boot/compressed/misc.c @@ -33,6 +33,7 @@ unsigned int __machine_arch_type; #else static void putstr(const char *ptr); +extern void error(char *x); #include diff --git a/arch/arm/plat-samsung/include/plat/uncompress.h b/arch/arm/plat-samsung/include/plat/uncompress.h index e87ce8ffbbcd..7d6ed7263d57 100644 --- a/arch/arm/plat-samsung/include/plat/uncompress.h +++ b/arch/arm/plat-samsung/include/plat/uncompress.h @@ -140,8 +140,6 @@ static void arch_decomp_error(const char *x) #define arch_error arch_decomp_error #endif -static void error(char *err); - #ifdef CONFIG_S3C_BOOT_UART_FORCE_FIFO static inline void arch_enable_uart_fifo(void) { -- cgit v1.2.3 From 438ff39d7813515d3aac20f53b5b95aaa58b38f9 Mon Sep 17 00:00:00 2001 From: Rob Alley Date: Thu, 11 Mar 2010 02:15:04 +0100 Subject: ARM: 5986/1: at91sam9g20-ek: Correct braces in I2C registration code The change introduced in patch 5596/1 used incorrect bracing which resulted in the AT24 EEPROM no longer being registered. This patch corrects the bracing and allows both the WM8731 audio device and AT24 EEPROM device to be registered. Signed-off-by: Rob Alley Signed-off-by: Russell King --- arch/arm/mach-at91/board-sam9g20ek.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/arch/arm/mach-at91/board-sam9g20ek.c b/arch/arm/mach-at91/board-sam9g20ek.c index 29cf83177484..c11fd47aec5d 100644 --- a/arch/arm/mach-at91/board-sam9g20ek.c +++ b/arch/arm/mach-at91/board-sam9g20ek.c @@ -271,10 +271,12 @@ static void __init ek_add_device_buttons(void) {} static struct i2c_board_info __initdata ek_i2c_devices[] = { - { - I2C_BOARD_INFO("24c512", 0x50), - I2C_BOARD_INFO("wm8731", 0x1b), - }, + { + I2C_BOARD_INFO("24c512", 0x50) + }, + { + I2C_BOARD_INFO("wm8731", 0x1b) + }, }; -- cgit v1.2.3 From bb35579b45bcb0b74167a2165d3ea973f640bf00 Mon Sep 17 00:00:00 2001 From: Mikael Pettersson Date: Thu, 11 Mar 2010 12:28:31 +0100 Subject: ARM: 5987/1: fix warning in kernel/elfcore.c from ARM's elf.h 2.6.34-rc1 added kernel/elfcore.c which includes . On ARM, this results in: In file included from include/linux/elf.h:7, from kernel/elfcore.c:1: /tmp/linux-2.6.34-rc1/arch/arm/include/asm/elf.h:101: warning: 'struct task_struct' declared inside parameter list /tmp/linux-2.6.34-rc1/arch/arm/include/asm/elf.h:101: warning: its scope is only this definition or declaration, which is probably not what you want Including seems a bit heavyweight, so this patch just adds a tentative declaration of struct task_struct in . Signed-off-by: Mikael Pettersson Signed-off-by: Russell King --- arch/arm/include/asm/elf.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index a399bb5730f1..bff056489cc1 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -98,6 +98,7 @@ extern int elf_check_arch(const struct elf32_hdr *); extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int); #define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(&(ex), stk) +struct task_struct; int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); #define ELF_CORE_COPY_TASK_REGS dump_task_regs -- cgit v1.2.3 From a91ed072d6135926c296a42b0b46de35e6347845 Mon Sep 17 00:00:00 2001 From: Greg Ungerer Date: Fri, 12 Mar 2010 06:19:25 +0100 Subject: ARM: 5988/1: pgprot_dmacoherent() for non-mmu builds Commit 26a26d329688ab018e068b412b03d43d7c299f0a ("dma-mapping: switch ARMv7 DMA mappings to retain 'memory' attribute") added a new macro, pgprot_dmacoherent(), to correctly map DMA memory. The non-mmu pgtable support code also needs to implement this macro, otherwise when compiling you get: CC arch/arm/mm/dma-mapping.o arch/arm/mm/dma-mapping.c: In function 'dma_alloc_coherent': arch/arm/mm/dma-mapping.c:320: error: implicit declaration of function 'pgprot_dmacoherent' arch/arm/mm/dma-mapping.c:320: error: 'pgprot_kernel' undeclared (first use in this function) arch/arm/mm/dma-mapping.c:320: error: (Each undeclared identifier is reported only once arch/arm/mm/dma-mapping.c:320: error: for each function it appears in.) Signed-off-by: Greg Ungerer Signed-off-by: Russell King --- arch/arm/include/asm/pgtable-nommu.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h index 013cfcdc4839..ffc0e85775b4 100644 --- a/arch/arm/include/asm/pgtable-nommu.h +++ b/arch/arm/include/asm/pgtable-nommu.h @@ -67,6 +67,7 @@ static inline int pte_file(pte_t pte) { return 0; } */ #define pgprot_noncached(prot) __pgprot(0) #define pgprot_writecombine(prot) __pgprot(0) +#define pgprot_dmacoherent(prot) __pgprot(0) /* -- cgit v1.2.3 From ddee87f208b6229d2910dd5930c87089dc56c87e Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 25 Feb 2010 15:04:14 +0100 Subject: ARM: 5959/1: ARM: perf-events: request PMU interrupts with IRQF_NOBALANCING If IRQ balancing is used on a multicore ARM system, PMU interrupt lines may be relocated onto CPUs other than the one causing the counter overflow. This can result in misattribution of events to the wrong core and, in the case that the CPU handling the interrupt has not experience counter overflow, the interrupt can be disabled because the handler returns IRQ_NONE. This patch adds the IRQF_NOBALANCING flag to the request_irq call in perf_events.c. Acked-by: Jamie Iles Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/kernel/perf_event.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index c54ceb3d1f97..b44d15948b56 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -332,7 +332,8 @@ armpmu_reserve_hardware(void) for (i = 0; i < pmu_irqs->num_irqs; ++i) { err = request_irq(pmu_irqs->irqs[i], armpmu->handle_irq, - IRQF_DISABLED, "armpmu", NULL); + IRQF_DISABLED | IRQF_NOBALANCING, + "armpmu", NULL); if (err) { pr_warning("unable to request IRQ%d for ARM " "perf counters\n", pmu_irqs->irqs[i]); -- cgit v1.2.3 From d10fca9f39238b07cc670b441d2b423de30359d2 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 26 Feb 2010 10:46:15 +0100 Subject: ARM: 5960/1: ARM: perf-events: fix v7 event selection mask The event selection mask for ARMv7 cores [ARMV7_EVTSEL_MASK] is incorrectly set to 0x7f. This means that the top bit of an event ID is ignored, so counting branch misses (id=0x10) and ISBs (id=0x90) give the same results. This patch sets the event selection mask to the correct value of 0xff. Signed-off-by: Jean Pihet Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/kernel/perf_event.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index b44d15948b56..c45a155a73dc 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -1625,7 +1625,7 @@ enum armv7_counters { /* * EVTSEL: Event selection reg */ -#define ARMV7_EVTSEL_MASK 0x7f /* Mask for writable bits */ +#define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */ /* * SELECT: Counter selection reg -- cgit v1.2.3 From 3ee8f0a2b1c81f0472b25d40aa5c1c7c6a0edc2a Mon Sep 17 00:00:00 2001 From: Markus Rathgeb Date: Sat, 13 Mar 2010 17:29:43 +0100 Subject: HID: Add RGT Clutch Wheel clutch device id This patch enables force feedback for the "RGT Force Feedback CLUTCH Racing Wheel". It only modifies hid-core.c (hid_blacklist) and hid-tmff.c to add the new USB IDs. Signed-off-by: Markus Rathgeb Signed-off-by: Jiri Kosina --- drivers/hid/hid-core.c | 1 + drivers/hid/hid-tmff.c | 2 ++ 2 files changed, 3 insertions(+) diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 368fbb0c4ca6..2e2aa759d230 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1357,6 +1357,7 @@ static const struct hid_device_id hid_blacklist[] = { { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324) }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651) }, + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) }, { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) }, diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c index 167ea746fb9c..c32f32c84ac8 100644 --- a/drivers/hid/hid-tmff.c +++ b/drivers/hid/hid-tmff.c @@ -251,6 +251,8 @@ static const struct hid_device_id tm_devices[] = { .driver_data = (unsigned long)ff_rumble }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651), /* FGT Rumble Force Wheel */ .driver_data = (unsigned long)ff_rumble }, + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653), /* RGT Force Feedback CLUTCH Raging Wheel */ + .driver_data = (unsigned long)ff_joystick }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654), /* FGT Force Feedback Wheel */ .driver_data = (unsigned long)ff_joystick }, { } -- cgit v1.2.3 From 1027247f6eb727db6c600b9eb02796f0766ae704 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 12 Feb 2010 14:36:24 +0000 Subject: ARM: Add L2 cache handling to smp boot support The page table and secondary data which we're asking the secondary CPU to make use of has to hit RAM to ensure that the secondary CPU can see it since it may not be taking part in coherency or cache searches at this point. Acked-by: Santosh Shilimkar Signed-off-by: Russell King --- arch/arm/kernel/smp.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 57162af53dc9..577543f3857f 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -99,6 +99,7 @@ int __cpuinit __cpu_up(unsigned int cpu) *pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) | PMD_TYPE_SECT | PMD_SECT_AP_WRITE); flush_pmd_entry(pmd); + outer_clean_range(__pa(pmd), __pa(pmd + 1)); /* * We need to tell the secondary core where to find @@ -106,7 +107,8 @@ int __cpuinit __cpu_up(unsigned int cpu) */ secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; secondary_data.pgdir = virt_to_phys(pgd); - wmb(); + __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); + outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); /* * Now bring the CPU into our world. -- cgit v1.2.3 From 3f17522ce461a31e7ced6311b28fcf5b8a763316 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 12 Feb 2010 14:32:01 +0000 Subject: Video: ARM CLCD: Better fix for swapped IENB and CNTL registers On PL111, as found on Realview and other platforms, these registers are always arranged as CNTL then IENB. On PL110, these registers are IENB then CNTL, except on Versatile platforms. Re-arrange the handling of these register swaps so that PL111 always gets it right without resorting to ifdefs, leaving the only case needing special handling being PL110 on Versatile. Fill out amba/clcd.h with the PL110/PL111 register definition differences in case someone tries to use the PL110 specific definitions on PL111. Signed-off-by: Russell King --- drivers/video/amba-clcd.c | 31 ++++++++++++++++++++++++------- include/linux/amba/clcd.h | 33 +++++++++++++++++---------------- 2 files changed, 41 insertions(+), 23 deletions(-) diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c index a21efcd10b78..afe21e6eb544 100644 --- a/drivers/video/amba-clcd.c +++ b/drivers/video/amba-clcd.c @@ -65,16 +65,16 @@ static void clcdfb_disable(struct clcd_fb *fb) if (fb->board->disable) fb->board->disable(fb); - val = readl(fb->regs + CLCD_CNTL); + val = readl(fb->regs + fb->off_cntl); if (val & CNTL_LCDPWR) { val &= ~CNTL_LCDPWR; - writel(val, fb->regs + CLCD_CNTL); + writel(val, fb->regs + fb->off_cntl); clcdfb_sleep(20); } if (val & CNTL_LCDEN) { val &= ~CNTL_LCDEN; - writel(val, fb->regs + CLCD_CNTL); + writel(val, fb->regs + fb->off_cntl); } /* @@ -94,7 +94,7 @@ static void clcdfb_enable(struct clcd_fb *fb, u32 cntl) * Bring up by first enabling.. */ cntl |= CNTL_LCDEN; - writel(cntl, fb->regs + CLCD_CNTL); + writel(cntl, fb->regs + fb->off_cntl); clcdfb_sleep(20); @@ -102,7 +102,7 @@ static void clcdfb_enable(struct clcd_fb *fb, u32 cntl) * and now apply power. */ cntl |= CNTL_LCDPWR; - writel(cntl, fb->regs + CLCD_CNTL); + writel(cntl, fb->regs + fb->off_cntl); /* * finally, enable the interface. @@ -233,7 +233,7 @@ static int clcdfb_set_par(struct fb_info *info) readl(fb->regs + CLCD_TIM0), readl(fb->regs + CLCD_TIM1), readl(fb->regs + CLCD_TIM2), readl(fb->regs + CLCD_TIM3), readl(fb->regs + CLCD_UBAS), readl(fb->regs + CLCD_LBAS), - readl(fb->regs + CLCD_IENB), readl(fb->regs + CLCD_CNTL)); + readl(fb->regs + fb->off_ienb), readl(fb->regs + fb->off_cntl)); #endif return 0; @@ -345,6 +345,23 @@ static int clcdfb_register(struct clcd_fb *fb) { int ret; + /* + * ARM PL111 always has IENB at 0x1c; it's only PL110 + * which is reversed on some platforms. + */ + if (amba_manf(fb->dev) == 0x41 && amba_part(fb->dev) == 0x111) { + fb->off_ienb = CLCD_PL111_IENB; + fb->off_cntl = CLCD_PL111_CNTL; + } else { +#ifdef CONFIG_ARCH_VERSATILE + fb->off_ienb = CLCD_PL111_IENB; + fb->off_cntl = CLCD_PL111_CNTL; +#else + fb->off_ienb = CLCD_PL110_IENB; + fb->off_cntl = CLCD_PL110_CNTL; +#endif + } + fb->clk = clk_get(&fb->dev->dev, NULL); if (IS_ERR(fb->clk)) { ret = PTR_ERR(fb->clk); @@ -416,7 +433,7 @@ static int clcdfb_register(struct clcd_fb *fb) /* * Ensure interrupts are disabled. */ - writel(0, fb->regs + CLCD_IENB); + writel(0, fb->regs + fb->off_ienb); fb_set_var(&fb->fb, &fb->fb.var); diff --git a/include/linux/amba/clcd.h b/include/linux/amba/clcd.h index 29c0448265cf..ca16c3801a1e 100644 --- a/include/linux/amba/clcd.h +++ b/include/linux/amba/clcd.h @@ -21,22 +21,21 @@ #define CLCD_UBAS 0x00000010 #define CLCD_LBAS 0x00000014 -#if !defined(CONFIG_ARCH_VERSATILE) && !defined(CONFIG_ARCH_REALVIEW) -#define CLCD_IENB 0x00000018 -#define CLCD_CNTL 0x0000001c -#else -/* - * Someone rearranged these two registers on the Versatile - * platform... - */ -#define CLCD_IENB 0x0000001c -#define CLCD_CNTL 0x00000018 -#endif - -#define CLCD_STAT 0x00000020 -#define CLCD_INTR 0x00000024 -#define CLCD_UCUR 0x00000028 -#define CLCD_LCUR 0x0000002C +#define CLCD_PL110_IENB 0x00000018 +#define CLCD_PL110_CNTL 0x0000001c +#define CLCD_PL110_STAT 0x00000020 +#define CLCD_PL110_INTR 0x00000024 +#define CLCD_PL110_UCUR 0x00000028 +#define CLCD_PL110_LCUR 0x0000002C + +#define CLCD_PL111_CNTL 0x00000018 +#define CLCD_PL111_IENB 0x0000001c +#define CLCD_PL111_RIS 0x00000020 +#define CLCD_PL111_MIS 0x00000024 +#define CLCD_PL111_ICR 0x00000028 +#define CLCD_PL111_UCUR 0x0000002c +#define CLCD_PL111_LCUR 0x00000030 + #define CLCD_PALL 0x00000200 #define CLCD_PALETTE 0x00000200 @@ -147,6 +146,8 @@ struct clcd_fb { struct clcd_board *board; void *board_data; void __iomem *regs; + u16 off_ienb; + u16 off_cntl; u32 clcd_cntl; u32 cmap[16]; }; -- cgit v1.2.3 From 03fb256df9c960b10c0e01b7e92d2f31433675fe Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Tue, 16 Feb 2010 10:48:15 +0000 Subject: ARM: mach-shmobile: G3EVM KEYSC platform data This patch adds KEYSC platform data for the G3EVM board. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/arm/mach-shmobile/board-g3evm.c | 55 +++++++++++++++++++++++++++++++++++ arch/arm/mach-shmobile/clock-sh7367.c | 7 +++++ 2 files changed, 62 insertions(+) diff --git a/arch/arm/mach-shmobile/board-g3evm.c b/arch/arm/mach-shmobile/board-g3evm.c index f36c9a94d326..9d326893a212 100644 --- a/arch/arm/mach-shmobile/board-g3evm.c +++ b/arch/arm/mach-shmobile/board-g3evm.c @@ -29,6 +29,8 @@ #include #include #include +#include +#include #include #include #include @@ -127,9 +129,47 @@ static struct platform_device usb_host_device = { .resource = usb_host_resources, }; +/* KEYSC */ +static struct sh_keysc_info keysc_info = { + .mode = SH_KEYSC_MODE_5, + .scan_timing = 3, + .delay = 100, + .keycodes = { + KEY_A, KEY_B, KEY_C, KEY_D, KEY_E, KEY_F, KEY_G, + KEY_H, KEY_I, KEY_J, KEY_K, KEY_L, KEY_M, KEY_N, + KEY_O, KEY_P, KEY_Q, KEY_R, KEY_S, KEY_T, KEY_U, + KEY_V, KEY_W, KEY_X, KEY_Y, KEY_Z, KEY_HOME, KEY_SLEEP, + KEY_WAKEUP, KEY_COFFEE, KEY_0, KEY_1, KEY_2, KEY_3, KEY_4, + KEY_5, KEY_6, KEY_7, KEY_8, KEY_9, KEY_STOP, KEY_COMPUTER, + }, +}; + +static struct resource keysc_resources[] = { + [0] = { + .name = "KEYSC", + .start = 0xe61b0000, + .end = 0xe61b000f, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = 79, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device keysc_device = { + .name = "sh_keysc", + .num_resources = ARRAY_SIZE(keysc_resources), + .resource = keysc_resources, + .dev = { + .platform_data = &keysc_info, + }, +}; + static struct platform_device *g3evm_devices[] __initdata = { &nor_flash_device, &usb_host_device, + &keysc_device, }; static struct map_desc g3evm_io_desc[] __initdata = { @@ -196,6 +236,21 @@ static void __init g3evm_init(void) __raw_writew(0x6010, 0xe60581c6); /* CGPOSR */ __raw_writew(0x8a0a, 0xe605810c); /* USBCR2 */ + /* KEYSC @ CN7 */ + gpio_request(GPIO_FN_PORT42_KEYOUT0, NULL); + gpio_request(GPIO_FN_PORT43_KEYOUT1, NULL); + gpio_request(GPIO_FN_PORT44_KEYOUT2, NULL); + gpio_request(GPIO_FN_PORT45_KEYOUT3, NULL); + gpio_request(GPIO_FN_PORT46_KEYOUT4, NULL); + gpio_request(GPIO_FN_PORT47_KEYOUT5, NULL); + gpio_request(GPIO_FN_PORT48_KEYIN0_PU, NULL); + gpio_request(GPIO_FN_PORT49_KEYIN1_PU, NULL); + gpio_request(GPIO_FN_PORT50_KEYIN2_PU, NULL); + gpio_request(GPIO_FN_PORT55_KEYIN3_PU, NULL); + gpio_request(GPIO_FN_PORT56_KEYIN4_PU, NULL); + gpio_request(GPIO_FN_PORT57_KEYIN5_PU, NULL); + gpio_request(GPIO_FN_PORT58_KEYIN6_PU, NULL); + sh7367_add_standard_devices(); platform_add_devices(g3evm_devices, ARRAY_SIZE(g3evm_devices)); diff --git a/arch/arm/mach-shmobile/clock-sh7367.c b/arch/arm/mach-shmobile/clock-sh7367.c index 58bd54e1113a..bb940c6e4e6c 100644 --- a/arch/arm/mach-shmobile/clock-sh7367.c +++ b/arch/arm/mach-shmobile/clock-sh7367.c @@ -75,6 +75,11 @@ static struct clk usb0_clk = { .name = "usb0", }; +/* a static keysc0 clk for now - enough to get sh_keysc working */ +static struct clk keysc0_clk = { + .name = "keysc0", +}; + static struct clk_lookup lookups[] = { { .clk = &peripheral_clk, @@ -82,6 +87,8 @@ static struct clk_lookup lookups[] = { .clk = &r_clk, }, { .clk = &usb0_clk, + }, { + .clk = &keysc0_clk, } }; -- cgit v1.2.3 From 143f3b833f98271341379d813cb72deb1657a380 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 19 Feb 2010 09:54:06 +0000 Subject: ARM: mach-shmobile: G3EVM FLCTL platform data This patch adds FLCTL platform data for the G3EVM board. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/arm/mach-shmobile/board-g3evm.c | 67 ++++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/arch/arm/mach-shmobile/board-g3evm.c b/arch/arm/mach-shmobile/board-g3evm.c index 9d326893a212..9247503296c4 100644 --- a/arch/arm/mach-shmobile/board-g3evm.c +++ b/arch/arm/mach-shmobile/board-g3evm.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -166,10 +167,53 @@ static struct platform_device keysc_device = { }, }; +static struct mtd_partition nand_partition_info[] = { + { + .name = "system", + .offset = 0, + .size = 64 * 1024 * 1024, + }, + { + .name = "userdata", + .offset = MTDPART_OFS_APPEND, + .size = 128 * 1024 * 1024, + }, + { + .name = "cache", + .offset = MTDPART_OFS_APPEND, + .size = 64 * 1024 * 1024, + }, +}; + +static struct resource nand_flash_resources[] = { + [0] = { + .start = 0xe6a30000, + .end = 0xe6a3009b, + .flags = IORESOURCE_MEM, + } +}; + +static struct sh_flctl_platform_data nand_flash_data = { + .parts = nand_partition_info, + .nr_parts = ARRAY_SIZE(nand_partition_info), + .flcmncr_val = QTSEL_E | FCKSEL_E | TYPESEL_SET | NANWF_E + | SHBUSSEL | SEL_16BIT, +}; + +static struct platform_device nand_flash_device = { + .name = "sh_flctl", + .resource = nand_flash_resources, + .num_resources = ARRAY_SIZE(nand_flash_resources), + .dev = { + .platform_data = &nand_flash_data, + }, +}; + static struct platform_device *g3evm_devices[] __initdata = { &nor_flash_device, &usb_host_device, &keysc_device, + &nand_flash_device, }; static struct map_desc g3evm_io_desc[] __initdata = { @@ -251,6 +295,29 @@ static void __init g3evm_init(void) gpio_request(GPIO_FN_PORT57_KEYIN5_PU, NULL); gpio_request(GPIO_FN_PORT58_KEYIN6_PU, NULL); + /* FLCTL */ + gpio_request(GPIO_FN_FCE0, NULL); + gpio_request(GPIO_FN_D0_ED0_NAF0, NULL); + gpio_request(GPIO_FN_D1_ED1_NAF1, NULL); + gpio_request(GPIO_FN_D2_ED2_NAF2, NULL); + gpio_request(GPIO_FN_D3_ED3_NAF3, NULL); + gpio_request(GPIO_FN_D4_ED4_NAF4, NULL); + gpio_request(GPIO_FN_D5_ED5_NAF5, NULL); + gpio_request(GPIO_FN_D6_ED6_NAF6, NULL); + gpio_request(GPIO_FN_D7_ED7_NAF7, NULL); + gpio_request(GPIO_FN_D8_ED8_NAF8, NULL); + gpio_request(GPIO_FN_D9_ED9_NAF9, NULL); + gpio_request(GPIO_FN_D10_ED10_NAF10, NULL); + gpio_request(GPIO_FN_D11_ED11_NAF11, NULL); + gpio_request(GPIO_FN_D12_ED12_NAF12, NULL); + gpio_request(GPIO_FN_D13_ED13_NAF13, NULL); + gpio_request(GPIO_FN_D14_ED14_NAF14, NULL); + gpio_request(GPIO_FN_D15_ED15_NAF15, NULL); + gpio_request(GPIO_FN_WE0_XWR0_FWE, NULL); + gpio_request(GPIO_FN_FRB, NULL); + /* FOE, FCDE, FSC on dedicated pins */ + __raw_writel(__raw_readl(0xe6158048) & ~(1 << 15), 0xe6158048); + sh7367_add_standard_devices(); platform_add_devices(g3evm_devices, ARRAY_SIZE(g3evm_devices)); -- cgit v1.2.3 From 3e862c05ca1bf5bd4cb703bc257d180a4583bc41 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 19 Feb 2010 10:01:22 +0000 Subject: mtd: enable sh_flctl on SH-Mobile ARM Update the Kconfig entry for the sh_flctl driver to enable build on SH-Mobile ARM platforms. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- drivers/mtd/nand/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 1157d5679e66..42e5ea49e975 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -457,7 +457,7 @@ config MTD_NAND_NOMADIK config MTD_NAND_SH_FLCTL tristate "Support for NAND on Renesas SuperH FLCTL" - depends on MTD_NAND && SUPERH + depends on MTD_NAND && (SUPERH || ARCH_SHMOBILE) help Several Renesas SuperH CPU has FLCTL. This option enables support for NAND Flash using FLCTL. -- cgit v1.2.3 From 6676a1701b0b135dacbb7cfeef48004315300df0 Mon Sep 17 00:00:00 2001 From: NISHIMOTO Hiroki Date: Tue, 23 Feb 2010 10:55:10 +0000 Subject: ARM: mach-shmobile: G4EVM KEYSC platform data This patch adds KEYSC platform data for the G4EVM board. Signed-off-by: NISHIMOTO Hiroki Signed-off-by: Paul Mundt --- arch/arm/mach-shmobile/board-g4evm.c | 57 ++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/arch/arm/mach-shmobile/board-g4evm.c b/arch/arm/mach-shmobile/board-g4evm.c index 5acd623f93e7..10673a90be52 100644 --- a/arch/arm/mach-shmobile/board-g4evm.c +++ b/arch/arm/mach-shmobile/board-g4evm.c @@ -28,6 +28,8 @@ #include #include #include +#include +#include #include #include #include @@ -128,9 +130,49 @@ static struct platform_device usb_host_device = { .resource = usb_host_resources, }; +/* KEYSC */ +static struct sh_keysc_info keysc_info = { + .mode = SH_KEYSC_MODE_5, + .scan_timing = 3, + .delay = 100, + .keycodes = { + KEY_A, KEY_B, KEY_C, KEY_D, KEY_E, KEY_F, + KEY_G, KEY_H, KEY_I, KEY_J, KEY_K, KEY_L, + KEY_M, KEY_N, KEY_U, KEY_P, KEY_Q, KEY_R, + KEY_S, KEY_T, KEY_U, KEY_V, KEY_W, KEY_X, + KEY_Y, KEY_Z, KEY_HOME, KEY_SLEEP, KEY_WAKEUP, KEY_COFFEE, + KEY_0, KEY_1, KEY_2, KEY_3, KEY_4, KEY_5, + KEY_6, KEY_7, KEY_8, KEY_9, KEY_STOP, KEY_COMPUTER, + }, +}; + +static struct resource keysc_resources[] = { + [0] = { + .name = "KEYSC", + .start = 0xe61b0000, + .end = 0xe61b000f, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = 79, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device keysc_device = { + .name = "sh_keysc", + .id = 0, /* keysc0 clock */ + .num_resources = ARRAY_SIZE(keysc_resources), + .resource = keysc_resources, + .dev = { + .platform_data = &keysc_info, + }, +}; + static struct platform_device *g4evm_devices[] __initdata = { &nor_flash_device, &usb_host_device, + &keysc_device, }; static struct map_desc g4evm_io_desc[] __initdata = { @@ -196,6 +238,21 @@ static void __init g4evm_init(void) __raw_writew(0x6010, 0xe60581c6); /* CGPOSR */ __raw_writew(0x8a0a, 0xe605810c); /* USBCR2 */ + /* KEYSC @ CN31 */ + gpio_request(GPIO_FN_PORT60_KEYOUT5, NULL); + gpio_request(GPIO_FN_PORT61_KEYOUT4, NULL); + gpio_request(GPIO_FN_PORT62_KEYOUT3, NULL); + gpio_request(GPIO_FN_PORT63_KEYOUT2, NULL); + gpio_request(GPIO_FN_PORT64_KEYOUT1, NULL); + gpio_request(GPIO_FN_PORT65_KEYOUT0, NULL); + gpio_request(GPIO_FN_PORT66_KEYIN0_PU, NULL); + gpio_request(GPIO_FN_PORT67_KEYIN1_PU, NULL); + gpio_request(GPIO_FN_PORT68_KEYIN2_PU, NULL); + gpio_request(GPIO_FN_PORT69_KEYIN3_PU, NULL); + gpio_request(GPIO_FN_PORT70_KEYIN4_PU, NULL); + gpio_request(GPIO_FN_PORT71_KEYIN5_PU, NULL); + gpio_request(GPIO_FN_PORT72_KEYIN6_PU, NULL); + sh7377_add_standard_devices(); platform_add_devices(g4evm_devices, ARRAY_SIZE(g4evm_devices)); -- cgit v1.2.3 From 9615b37c5c8fed963811c100053d495c412880fb Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 10 Mar 2010 05:13:12 +0000 Subject: ARM: mach-shmobile: sh7367 SDHI vector merge Merge the SDHI vectors for sh7367 using the recently merged INTC force_enable/disable feature. With this in place SDHI hotplug is supported using the drivers sh_mobile_sdhi and tmio_mmc. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/arm/mach-shmobile/intc-sh7367.c | 46 ++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/arch/arm/mach-shmobile/intc-sh7367.c b/arch/arm/mach-shmobile/intc-sh7367.c index 6a547b47aabb..5ff70cadfc32 100644 --- a/arch/arm/mach-shmobile/intc-sh7367.c +++ b/arch/arm/mach-shmobile/intc-sh7367.c @@ -27,6 +27,8 @@ enum { UNUSED_INTCA = 0, + ENABLED, + DISABLED, /* interrupt sources INTCA */ IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A, @@ -46,8 +48,8 @@ enum { MSIOF2, MSIOF1, SCIFA4, SCIFA5, SCIFB, FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I, - SDHI0_SDHI0I0, SDHI0_SDHI0I1, SDHI0_SDHI0I2, SDHI0_SDHI0I3, - SDHI1_SDHI1I0, SDHI1_SDHI1I1, SDHI1_SDHI1I2, SDHI1_SDHI1I3, + SDHI0, + SDHI1, MSU_MSU, MSU_MSU2, IREM, SIU, @@ -59,7 +61,7 @@ enum { TTI20, MISTY, DDM, - SDHI2_SDHI2I0, SDHI2_SDHI2I1, SDHI2_SDHI2I2, SDHI2_SDHI2I3, + SDHI2, RWDT0, RWDT1, DMAC_1_DEI0, DMAC_1_DEI1, DMAC_1_DEI2, DMAC_1_DEI3, DMAC_2_DEI4, DMAC_2_DEI5, DMAC_2_DADERR, @@ -70,7 +72,7 @@ enum { /* interrupt groups INTCA */ DMAC_1, DMAC_2, DMAC2_1, DMAC2_2, DMAC3_1, DMAC3_2, - ETM11, ARM11, USBHS, FLCTL, IIC1, SDHI0, SDHI1, SDHI2, + ETM11, ARM11, USBHS, FLCTL, IIC1 }; static struct intc_vect intca_vectors[] = { @@ -105,10 +107,10 @@ static struct intc_vect intca_vectors[] = { INTC_VECT(SCIFB, 0x0d60), INTC_VECT(FLCTL_FLSTEI, 0x0d80), INTC_VECT(FLCTL_FLTENDI, 0x0da0), INTC_VECT(FLCTL_FLTREQ0I, 0x0dc0), INTC_VECT(FLCTL_FLTREQ1I, 0x0de0), - INTC_VECT(SDHI0_SDHI0I0, 0x0e00), INTC_VECT(SDHI0_SDHI0I1, 0x0e20), - INTC_VECT(SDHI0_SDHI0I2, 0x0e40), INTC_VECT(SDHI0_SDHI0I3, 0x0e60), - INTC_VECT(SDHI1_SDHI1I0, 0x0e80), INTC_VECT(SDHI1_SDHI1I1, 0x0ea0), - INTC_VECT(SDHI1_SDHI1I2, 0x0ec0), INTC_VECT(SDHI1_SDHI1I3, 0x0ee0), + INTC_VECT(SDHI0, 0x0e00), INTC_VECT(SDHI0, 0x0e20), + INTC_VECT(SDHI0, 0x0e40), INTC_VECT(SDHI0, 0x0e60), + INTC_VECT(SDHI1, 0x0e80), INTC_VECT(SDHI1, 0x0ea0), + INTC_VECT(SDHI1, 0x0ec0), INTC_VECT(SDHI1, 0x0ee0), INTC_VECT(MSU_MSU, 0x0f20), INTC_VECT(MSU_MSU2, 0x0f40), INTC_VECT(IREM, 0x0f60), INTC_VECT(SIU, 0x0fa0), @@ -122,8 +124,8 @@ static struct intc_vect intca_vectors[] = { INTC_VECT(TTI20, 0x1100), INTC_VECT(MISTY, 0x1120), INTC_VECT(DDM, 0x1140), - INTC_VECT(SDHI2_SDHI2I0, 0x1200), INTC_VECT(SDHI2_SDHI2I1, 0x1220), - INTC_VECT(SDHI2_SDHI2I2, 0x1240), INTC_VECT(SDHI2_SDHI2I3, 0x1260), + INTC_VECT(SDHI2, 0x1200), INTC_VECT(SDHI2, 0x1220), + INTC_VECT(SDHI2, 0x1240), INTC_VECT(SDHI2, 0x1260), INTC_VECT(RWDT0, 0x1280), INTC_VECT(RWDT1, 0x12a0), INTC_VECT(DMAC_1_DEI0, 0x2000), INTC_VECT(DMAC_1_DEI1, 0x2020), INTC_VECT(DMAC_1_DEI2, 0x2040), INTC_VECT(DMAC_1_DEI3, 0x2060), @@ -158,12 +160,6 @@ static struct intc_group intca_groups[] __initdata = { INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I), INTC_GROUP(IIC1, IIC1_ALI1, IIC1_TACKI1, IIC1_WAITI1, IIC1_DTEI1), - INTC_GROUP(SDHI0, SDHI0_SDHI0I0, SDHI0_SDHI0I1, - SDHI0_SDHI0I2, SDHI0_SDHI0I3), - INTC_GROUP(SDHI1, SDHI1_SDHI1I0, SDHI1_SDHI1I1, - SDHI1_SDHI1I2, SDHI1_SDHI1I3), - INTC_GROUP(SDHI2, SDHI2_SDHI2I0, SDHI2_SDHI2I1, - SDHI2_SDHI2I2, SDHI2_SDHI2I3), }; static struct intc_mask_reg intca_mask_registers[] = { @@ -193,10 +189,10 @@ static struct intc_mask_reg intca_mask_registers[] = { { SCIFB, SCIFA5, SCIFA4, MSIOF1, 0, 0, MSIOF2, 0 } }, { 0xe694009c, 0xe69400dc, 8, /* IMR7A / IMCR7A */ - { SDHI0_SDHI0I3, SDHI0_SDHI0I2, SDHI0_SDHI0I1, SDHI0_SDHI0I0, + { DISABLED, DISABLED, ENABLED, ENABLED, FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } }, { 0xe69400a0, 0xe69400e0, 8, /* IMR8A / IMCR8A */ - { SDHI1_SDHI1I3, SDHI1_SDHI1I2, SDHI1_SDHI1I1, SDHI1_SDHI1I0, + { DISABLED, DISABLED, ENABLED, ENABLED, TTI20, USBDMAC_USHDMI, SPU, SIU } }, { 0xe69400a4, 0xe69400e4, 8, /* IMR9A / IMCR9A */ { CMT1_CMT13, CMT1_CMT12, CMT1_CMT11, CMT1_CMT10, @@ -211,7 +207,7 @@ static struct intc_mask_reg intca_mask_registers[] = { { 0, 0, TPU0, TPU1, TPU2, TPU3, TPU4, 0 } }, { 0xe69400b4, 0xe69400f4, 8, /* IMR13A / IMCR13A */ - { SDHI2_SDHI2I3, SDHI2_SDHI2I2, SDHI2_SDHI2I1, SDHI2_SDHI2I0, + { DISABLED, DISABLED, ENABLED, ENABLED, MISTY, CMT3, RWDT1, RWDT0 } }, }; @@ -258,10 +254,14 @@ static struct intc_mask_reg intca_ack_registers[] __initdata = { { IRQ8A, IRQ9A, IRQ10A, IRQ11A, IRQ12A, IRQ13A, IRQ14A, IRQ15A } }, }; -static DECLARE_INTC_DESC_ACK(intca_desc, "sh7367-intca", - intca_vectors, intca_groups, - intca_mask_registers, intca_prio_registers, - intca_sense_registers, intca_ack_registers); +static struct intc_desc intca_desc __initdata = { + .name = "sh7367-intca", + .force_enable = ENABLED, + .force_disable = DISABLED, + .hw = INTC_HW_DESC(intca_vectors, intca_groups, + intca_mask_registers, intca_prio_registers, + intca_sense_registers, intca_ack_registers), +}; void __init sh7367_init_irq(void) { -- cgit v1.2.3 From c148abfc2d807b2734e7ecd0e00c71ef7d4b7f42 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 10 Mar 2010 05:15:16 +0000 Subject: ARM: mach-shmobile: sh7377 SDHI vector merge Merge the SDHI vectors for sh7377 using the recently merged INTC force_enable/disable feature. With this in place SDHI hotplug is supported using the drivers sh_mobile_sdhi and tmio_mmc. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/arm/mach-shmobile/intc-sh7377.c | 36 +++++++++++++++++++----------------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/arch/arm/mach-shmobile/intc-sh7377.c b/arch/arm/mach-shmobile/intc-sh7377.c index 125021cfba5c..5c781e2d1897 100644 --- a/arch/arm/mach-shmobile/intc-sh7377.c +++ b/arch/arm/mach-shmobile/intc-sh7377.c @@ -27,6 +27,8 @@ enum { UNUSED_INTCA = 0, + ENABLED, + DISABLED, /* interrupt sources INTCA */ IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A, @@ -49,8 +51,8 @@ enum { MSIOF2, MSIOF1, SCIFA4, SCIFA5, SCIFB, FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I, - SDHI0_SDHI0I0, SDHI0_SDHI0I1, SDHI0_SDHI0I2, SDHI0_SDHI0I3, - SDHI1_SDHI1I0, SDHI1_SDHI1I1, SDHI1_SDHI1I2, SDHI1_SDHI1I3, + SDHI0, + SDHI1, MSU_MSU, MSU_MSU2, IRREM, MSUG, @@ -84,7 +86,7 @@ enum { /* interrupt groups INTCA */ DMAC_1, DMAC_2, DMAC2_1, DMAC2_2, DMAC3_1, DMAC3_2, SHWYSTAT, - AP_ARM1, AP_ARM2, USBHS, SPU2, FLCTL, IIC1, SDHI0, SDHI1, + AP_ARM1, AP_ARM2, USBHS, SPU2, FLCTL, IIC1, ICUSB, ICUDMC }; @@ -128,10 +130,10 @@ static struct intc_vect intca_vectors[] = { INTC_VECT(SCIFB, 0x0d60), INTC_VECT(FLCTL_FLSTEI, 0x0d80), INTC_VECT(FLCTL_FLTENDI, 0x0da0), INTC_VECT(FLCTL_FLTREQ0I, 0x0dc0), INTC_VECT(FLCTL_FLTREQ1I, 0x0de0), - INTC_VECT(SDHI0_SDHI0I0, 0x0e00), INTC_VECT(SDHI0_SDHI0I1, 0x0e20), - INTC_VECT(SDHI0_SDHI0I2, 0x0e40), INTC_VECT(SDHI0_SDHI0I3, 0x0e60), - INTC_VECT(SDHI1_SDHI1I0, 0x0e80), INTC_VECT(SDHI1_SDHI1I1, 0x0ea0), - INTC_VECT(SDHI1_SDHI1I2, 0x0ec0), INTC_VECT(SDHI1_SDHI1I3, 0x0ee0), + INTC_VECT(SDHI0, 0x0e00), INTC_VECT(SDHI0, 0x0e20), + INTC_VECT(SDHI0, 0x0e40), INTC_VECT(SDHI0, 0x0e60), + INTC_VECT(SDHI1, 0x0e80), INTC_VECT(SDHI1, 0x0ea0), + INTC_VECT(SDHI1, 0x0ec0), INTC_VECT(SDHI1, 0x0ee0), INTC_VECT(MSU_MSU, 0x0f20), INTC_VECT(MSU_MSU2, 0x0f40), INTC_VECT(IRREM, 0x0f60), INTC_VECT(MSUG, 0x0fa0), @@ -195,10 +197,6 @@ static struct intc_group intca_groups[] __initdata = { INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I), INTC_GROUP(IIC1, IIC1_ALI1, IIC1_TACKI1, IIC1_WAITI1, IIC1_DTEI1), - INTC_GROUP(SDHI0, SDHI0_SDHI0I0, SDHI0_SDHI0I1, - SDHI0_SDHI0I2, SDHI0_SDHI0I3), - INTC_GROUP(SDHI1, SDHI1_SDHI1I0, SDHI1_SDHI1I1, - SDHI1_SDHI1I2, SDHI1_SDHI1I3), INTC_GROUP(SHWYSTAT, SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM), INTC_GROUP(ICUSB, ICUSB_ICUSB0, ICUSB_ICUSB1), INTC_GROUP(ICUDMC, ICUDMC_ICUDMC1, ICUDMC_ICUDMC2), @@ -236,10 +234,10 @@ static struct intc_mask_reg intca_mask_registers[] = { { SCIFB, SCIFA5, SCIFA4, MSIOF1, 0, 0, MSIOF2, 0 } }, { 0xe694009c, 0xe69400dc, 8, /* IMR7A / IMCR7A */ - { SDHI0_SDHI0I3, SDHI0_SDHI0I2, SDHI0_SDHI0I1, SDHI0_SDHI0I0, + { DISABLED, DISABLED, ENABLED, ENABLED, FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } }, { 0xe69400a0, 0xe69400e0, 8, /* IMR8A / IMCR8A */ - { SDHI1_SDHI1I3, SDHI1_SDHI1I2, SDHI1_SDHI1I1, SDHI1_SDHI1I0, + { DISABLED, DISABLED, ENABLED, ENABLED, TTI20, USBDMAC_USHDMI, 0, MSUG } }, { 0xe69400a4, 0xe69400e4, 8, /* IMR9A / IMCR9A */ { CMT1_CMT13, CMT1_CMT12, CMT1_CMT11, CMT1_CMT10, @@ -339,10 +337,14 @@ static struct intc_mask_reg intca_ack_registers[] __initdata = { { IRQ24A, IRQ25A, IRQ26A, IRQ27A, IRQ28A, IRQ29A, IRQ30A, IRQ31A } }, }; -static DECLARE_INTC_DESC_ACK(intca_desc, "sh7377-intca", - intca_vectors, intca_groups, - intca_mask_registers, intca_prio_registers, - intca_sense_registers, intca_ack_registers); +static struct intc_desc intca_desc __initdata = { + .name = "sh7377-intca", + .force_enable = ENABLED, + .force_disable = DISABLED, + .hw = INTC_HW_DESC(intca_vectors, intca_groups, + intca_mask_registers, intca_prio_registers, + intca_sense_registers, intca_ack_registers), +}; void __init sh7377_init_irq(void) { -- cgit v1.2.3 From c57a31abf0b469b9cab6810f4e1895bb7ef1c482 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 10 Mar 2010 05:17:24 +0000 Subject: ARM: mach-shmobile: sh7372 SDHI vector merge Merge the SDHI vectors for sh7372 using the recently merged INTC force_enable/disable feature. With this in place SDHI hotplug is supported using the drivers sh_mobile_sdhi and tmio_mmc. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/arm/mach-shmobile/intc-sh7372.c | 46 ++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/arch/arm/mach-shmobile/intc-sh7372.c b/arch/arm/mach-shmobile/intc-sh7372.c index c57a923f97a6..3ce9d9bd5899 100644 --- a/arch/arm/mach-shmobile/intc-sh7372.c +++ b/arch/arm/mach-shmobile/intc-sh7372.c @@ -27,6 +27,8 @@ enum { UNUSED_INTCA = 0, + ENABLED, + DISABLED, /* interrupt sources INTCA */ IRQ0A, IRQ1A, IRQ2A, IRQ3A, IRQ4A, IRQ5A, IRQ6A, IRQ7A, @@ -47,14 +49,14 @@ enum { MSIOF2, MSIOF1, SCIFA4, SCIFA5, SCIFB, FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I, - SDHI0_SDHI0I0, SDHI0_SDHI0I1, SDHI0_SDHI0I2, SDHI0_SDHI0I3, - SDHI1_SDHI1I0, SDHI1_SDHI1I1, SDHI1_SDHI1I2, + SDHI0, + SDHI1, IRREM, IRDA, TPU0, TTI20, DDM, - SDHI2_SDHI2I0, SDHI2_SDHI2I1, SDHI2_SDHI2I2, SDHI2_SDHI2I3, + SDHI2, RWDT0, DMAC1_1_DEI0, DMAC1_1_DEI1, DMAC1_1_DEI2, DMAC1_1_DEI3, DMAC1_2_DEI4, DMAC1_2_DEI5, DMAC1_2_DADERR, @@ -82,7 +84,7 @@ enum { /* interrupt groups INTCA */ DMAC1_1, DMAC1_2, DMAC2_1, DMAC2_2, DMAC3_1, DMAC3_2, SHWYSTAT, - AP_ARM1, AP_ARM2, SPU2, FLCTL, IIC1, SDHI0, SDHI1, SDHI2 + AP_ARM1, AP_ARM2, SPU2, FLCTL, IIC1 }; static struct intc_vect intca_vectors[] __initdata = { @@ -123,17 +125,17 @@ static struct intc_vect intca_vectors[] __initdata = { INTC_VECT(SCIFB, 0x0d60), INTC_VECT(FLCTL_FLSTEI, 0x0d80), INTC_VECT(FLCTL_FLTENDI, 0x0da0), INTC_VECT(FLCTL_FLTREQ0I, 0x0dc0), INTC_VECT(FLCTL_FLTREQ1I, 0x0de0), - INTC_VECT(SDHI0_SDHI0I0, 0x0e00), INTC_VECT(SDHI0_SDHI0I1, 0x0e20), - INTC_VECT(SDHI0_SDHI0I2, 0x0e40), INTC_VECT(SDHI0_SDHI0I3, 0x0e60), - INTC_VECT(SDHI1_SDHI1I0, 0x0e80), INTC_VECT(SDHI1_SDHI1I1, 0x0ea0), - INTC_VECT(SDHI1_SDHI1I2, 0x0ec0), + INTC_VECT(SDHI0, 0x0e00), INTC_VECT(SDHI0, 0x0e20), + INTC_VECT(SDHI0, 0x0e40), INTC_VECT(SDHI0, 0x0e60), + INTC_VECT(SDHI1, 0x0e80), INTC_VECT(SDHI1, 0x0ea0), + INTC_VECT(SDHI1, 0x0ec0), INTC_VECT(IRREM, 0x0f60), INTC_VECT(IRDA, 0x0480), INTC_VECT(TPU0, 0x04a0), INTC_VECT(TTI20, 0x1100), INTC_VECT(DDM, 0x1140), - INTC_VECT(SDHI2_SDHI2I0, 0x1200), INTC_VECT(SDHI2_SDHI2I1, 0x1220), - INTC_VECT(SDHI2_SDHI2I2, 0x1240), INTC_VECT(SDHI2_SDHI2I3, 0x1260), + INTC_VECT(SDHI2, 0x1200), INTC_VECT(SDHI2, 0x1220), + INTC_VECT(SDHI2, 0x1240), INTC_VECT(SDHI2, 0x1260), INTC_VECT(RWDT0, 0x1280), INTC_VECT(DMAC1_1_DEI0, 0x2000), INTC_VECT(DMAC1_1_DEI1, 0x2020), INTC_VECT(DMAC1_1_DEI2, 0x2040), INTC_VECT(DMAC1_1_DEI3, 0x2060), @@ -193,12 +195,6 @@ static struct intc_group intca_groups[] __initdata = { INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLTENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I), INTC_GROUP(IIC1, IIC1_ALI1, IIC1_TACKI1, IIC1_WAITI1, IIC1_DTEI1), - INTC_GROUP(SDHI0, SDHI0_SDHI0I0, SDHI0_SDHI0I1, - SDHI0_SDHI0I2, SDHI0_SDHI0I3), - INTC_GROUP(SDHI1, SDHI1_SDHI1I0, SDHI1_SDHI1I1, - SDHI1_SDHI1I2), - INTC_GROUP(SDHI2, SDHI2_SDHI2I0, SDHI2_SDHI2I1, - SDHI2_SDHI2I2, SDHI2_SDHI2I3), INTC_GROUP(SHWYSTAT, SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM), }; @@ -234,10 +230,10 @@ static struct intc_mask_reg intca_mask_registers[] __initdata = { { SCIFB, SCIFA5, SCIFA4, MSIOF1, 0, 0, MSIOF2, 0 } }, { 0xe694009c, 0xe69400dc, 8, /* IMR7A / IMCR7A */ - { SDHI0_SDHI0I3, SDHI0_SDHI0I2, SDHI0_SDHI0I1, SDHI0_SDHI0I0, + { DISABLED, DISABLED, ENABLED, ENABLED, FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } }, { 0xe69400a0, 0xe69400e0, 8, /* IMR8A / IMCR8A */ - { 0, SDHI1_SDHI1I2, SDHI1_SDHI1I1, SDHI1_SDHI1I0, + { 0, DISABLED, ENABLED, ENABLED, TTI20, USBHSDMAC0_USHDMI, 0, 0 } }, { 0xe69400a4, 0xe69400e4, 8, /* IMR9A / IMCR9A */ { CMT1_CMT13, CMT1_CMT12, CMT1_CMT11, CMT1_CMT10, @@ -252,7 +248,7 @@ static struct intc_mask_reg intca_mask_registers[] __initdata = { { 0, 0, TPU0, 0, 0, 0, 0, 0 } }, { 0xe69400b4, 0xe69400f4, 8, /* IMR13A / IMCR13A */ - { SDHI2_SDHI2I3, SDHI2_SDHI2I2, SDHI2_SDHI2I1, SDHI2_SDHI2I0, + { DISABLED, DISABLED, ENABLED, ENABLED, 0, CMT3, 0, RWDT0 } }, { 0xe6950080, 0xe69500c0, 8, /* IMR0A3 / IMCR0A3 */ { SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM, 0, @@ -358,10 +354,14 @@ static struct intc_mask_reg intca_ack_registers[] __initdata = { { IRQ24A, IRQ25A, IRQ26A, IRQ27A, IRQ28A, IRQ29A, IRQ30A, IRQ31A } }, }; -static DECLARE_INTC_DESC_ACK(intca_desc, "sh7372-intca", - intca_vectors, intca_groups, - intca_mask_registers, intca_prio_registers, - intca_sense_registers, intca_ack_registers); +static struct intc_desc intca_desc __initdata = { + .name = "sh7372-intca", + .force_enable = ENABLED, + .force_disable = DISABLED, + .hw = INTC_HW_DESC(intca_vectors, intca_groups, + intca_mask_registers, intca_prio_registers, + intca_sense_registers, intca_ack_registers), +}; void __init sh7372_init_irq(void) { -- cgit v1.2.3 From 3a14d0397732b6aaa541348b5a8e8f639ecd02b7 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 10 Mar 2010 09:26:44 +0000 Subject: ARM: mach-shmobile: ap4evb SDHI0 platform data V2 Add SDHI0 platform data for the AP4EVB board V2. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/arm/mach-shmobile/board-ap4evb.c | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c index a0463d926447..1c2ec96ce261 100644 --- a/arch/arm/mach-shmobile/board-ap4evb.c +++ b/arch/arm/mach-shmobile/board-ap4evb.c @@ -206,10 +206,32 @@ static struct platform_device keysc_device = { }, }; +/* SDHI0 */ +static struct resource sdhi0_resources[] = { + [0] = { + .name = "SDHI0", + .start = 0xe6850000, + .end = 0xe68501ff, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = 96, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device sdhi0_device = { + .name = "sh_mobile_sdhi", + .num_resources = ARRAY_SIZE(sdhi0_resources), + .resource = sdhi0_resources, + .id = 0, +}; + static struct platform_device *ap4evb_devices[] __initdata = { &nor_flash_device, &smc911x_device, &keysc_device, + &sdhi0_device, }; static struct map_desc ap4evb_io_desc[] __initdata = { @@ -286,6 +308,16 @@ static void __init ap4evb_init(void) gpio_request(GPIO_FN_KEYIN3_133, NULL); gpio_request(GPIO_FN_KEYIN4, NULL); + /* SDHI0 */ + gpio_request(GPIO_FN_SDHICD0, NULL); + gpio_request(GPIO_FN_SDHIWP0, NULL); + gpio_request(GPIO_FN_SDHICMD0, NULL); + gpio_request(GPIO_FN_SDHICLK0, NULL); + gpio_request(GPIO_FN_SDHID0_3, NULL); + gpio_request(GPIO_FN_SDHID0_2, NULL); + gpio_request(GPIO_FN_SDHID0_1, NULL); + gpio_request(GPIO_FN_SDHID0_0, NULL); + sh7372_add_standard_devices(); platform_add_devices(ap4evb_devices, ARRAY_SIZE(ap4evb_devices)); -- cgit v1.2.3 From 7278a22143b003e9af7b9ca1b5f1c40ae4b55d98 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 10 Mar 2010 11:33:10 +0000 Subject: video: enable sh_mobile_lcdc on SH-Mobile ARM This patch enables the sh_mobile_lcdc driver on SH-Mobile ARM. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- drivers/video/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index dabe804ba575..4be9b4832c55 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -1881,7 +1881,7 @@ config FB_W100 config FB_SH_MOBILE_LCDC tristate "SuperH Mobile LCDC framebuffer support" - depends on FB && SUPERH && HAVE_CLK + depends on FB && (SUPERH || ARCH_SHMOBILE) && HAVE_CLK select FB_SYS_FILLRECT select FB_SYS_COPYAREA select FB_SYS_IMAGEBLIT -- cgit v1.2.3 From 3f7581d66ece6b7ff643c8c817bfbd72cdbe9077 Mon Sep 17 00:00:00 2001 From: Huang Weiyi Date: Fri, 12 Mar 2010 13:05:06 +0000 Subject: serial: sh-sci: remove duplicated #include Remove duplicated #include('s) in drivers/serial/sh-sci.c Signed-off-by: Huang Weiyi Signed-off-by: Paul Mundt --- drivers/serial/sh-sci.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c index 980f39449ee5..f7b9aff88f4a 100644 --- a/drivers/serial/sh-sci.c +++ b/drivers/serial/sh-sci.c @@ -50,7 +50,6 @@ #include #include #include -#include #ifdef CONFIG_SUPERH #include -- cgit v1.2.3 From c602c8ad45d6ee6ad91fc544513cc96f70790983 Mon Sep 17 00:00:00 2001 From: Vitaliy Kulikov Date: Mon, 15 Mar 2010 09:01:26 +0100 Subject: ALSA: hda - New Intel HDA controller Added a PCI controller id on new Dell laptops. Signed-off-by: Vitaliy Kulikov Signed-off-by: Takashi Iwai --- sound/pci/hda/hda_intel.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 10bbb534d3ca..926815201885 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2706,6 +2706,7 @@ static struct pci_device_id azx_ids[] = { { PCI_DEVICE(0x8086, 0x3a6e), .driver_data = AZX_DRIVER_ICH }, /* PCH */ { PCI_DEVICE(0x8086, 0x3b56), .driver_data = AZX_DRIVER_ICH }, + { PCI_DEVICE(0x8086, 0x3b57), .driver_data = AZX_DRIVER_ICH }, /* CPT */ { PCI_DEVICE(0x8086, 0x1c20), .driver_data = AZX_DRIVER_PCH }, /* SCH */ -- cgit v1.2.3 From 28d1a85e136985982448b2f9b1342bae85ad1c98 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Mon, 15 Mar 2010 09:05:46 +0100 Subject: ALSA: hda - Add an error message for invalid mapping NID Add an error message to snd_hda_add_nid() for invalid mapping NID to make easier to hunt the buggy code. Also added a missing space to the error message in snd_hda_build_controls() Signed-off-by: Takashi Iwai --- sound/pci/hda/hda_codec.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 5bd7cf45f3a5..0e76ac2b2ace 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -1806,6 +1806,8 @@ int snd_hda_add_nid(struct hda_codec *codec, struct snd_kcontrol *kctl, item->nid = nid; return 0; } + printk(KERN_ERR "hda-codec: no NID for mapping control %s:%d:%d\n", + kctl->id.name, kctl->id.index, index); return -EINVAL; } EXPORT_SYMBOL_HDA(snd_hda_add_nid); @@ -2884,7 +2886,7 @@ int /*__devinit*/ snd_hda_build_controls(struct hda_bus *bus) list_for_each_entry(codec, &bus->codec_list, list) { int err = snd_hda_codec_build_controls(codec); if (err < 0) { - printk(KERN_ERR "hda_codec: cannot build controls" + printk(KERN_ERR "hda_codec: cannot build controls " "for #%d (error %d)\n", codec->addr, err); err = snd_hda_codec_reset(codec); if (err < 0) { -- cgit v1.2.3 From 9c4cc0bdede1c39bde60a0d5d9251aac71fbe719 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Mon, 15 Mar 2010 09:07:52 +0100 Subject: ALSA: hda - Fix secondary ADC of ALC260 basic model Fix adc_nids[] for ALC260 basic model to match with num_adc_nids. Otherwise you get an invalid NID in the secondary "Input Source" mixer element. Signed-off-by: Takashi Iwai Cc: --- sound/pci/hda/patch_realtek.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 3a8371990d75..ba45868d5242 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -6477,7 +6477,7 @@ static struct alc_config_preset alc260_presets[] = { .num_dacs = ARRAY_SIZE(alc260_dac_nids), .dac_nids = alc260_dac_nids, .num_adc_nids = ARRAY_SIZE(alc260_dual_adc_nids), - .adc_nids = alc260_adc_nids, + .adc_nids = alc260_dual_adc_nids, .num_channel_mode = ARRAY_SIZE(alc260_modes), .channel_mode = alc260_modes, .input_mux = &alc260_capture_source, -- cgit v1.2.3 From b43f6e5e258d67acae5961896d10bbe38c271070 Mon Sep 17 00:00:00 2001 From: Anisse Astier Date: Wed, 10 Mar 2010 19:17:46 +0100 Subject: ALSA: hda - Add PCI quirks for MSI NetOn AP1900 and Wind Top AE2220 This should make the speakers and jack detection work on MSI all-in-one computers NetOn AP1900 and Wind Top AE2220. Signed-off-by: Anisse Astier Cc: Signed-off-by: Takashi Iwai --- sound/pci/hda/patch_realtek.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index ba45868d5242..07637c4aa46f 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -9195,6 +9195,7 @@ static struct snd_pci_quirk alc882_cfg_tbl[] = { SND_PCI_QUIRK(0x1462, 0x4314, "MSI", ALC883_TARGA_DIG), SND_PCI_QUIRK(0x1462, 0x4319, "MSI", ALC883_TARGA_DIG), SND_PCI_QUIRK(0x1462, 0x4324, "MSI", ALC883_TARGA_DIG), + SND_PCI_QUIRK(0x1462, 0x4570, "MSI Wind Top AE2220", ALC883_TARGA_DIG), SND_PCI_QUIRK(0x1462, 0x6510, "MSI GX620", ALC883_TARGA_8ch_DIG), SND_PCI_QUIRK(0x1462, 0x6668, "MSI", ALC883_6ST_DIG), SND_PCI_QUIRK(0x1462, 0x7187, "MSI", ALC883_6ST_DIG), @@ -9204,6 +9205,7 @@ static struct snd_pci_quirk alc882_cfg_tbl[] = { SND_PCI_QUIRK(0x1462, 0x7280, "MSI", ALC883_6ST_DIG), SND_PCI_QUIRK(0x1462, 0x7327, "MSI", ALC883_6ST_DIG), SND_PCI_QUIRK(0x1462, 0x7350, "MSI", ALC883_6ST_DIG), + SND_PCI_QUIRK(0x1462, 0x7437, "MSI NetOn AP1900", ALC883_TARGA_DIG), SND_PCI_QUIRK(0x1462, 0xa422, "MSI", ALC883_TARGA_2ch_DIG), SND_PCI_QUIRK(0x1462, 0xaa08, "MSI", ALC883_TARGA_2ch_DIG), -- cgit v1.2.3 From 7a410e8d4d97457c8c381e2de9cdc7bd3306badc Mon Sep 17 00:00:00 2001 From: Yoichi Yuasa Date: Wed, 10 Mar 2010 15:57:56 +0900 Subject: pcmcia/vrc4171: use local spinlock for device local lock. struct pcmcia_socket lock had been used before. Signed-off-by: Yoichi Yuasa Signed-off-by: Dominik Brodowski --- drivers/pcmcia/vrc4171_card.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/pcmcia/vrc4171_card.c b/drivers/pcmcia/vrc4171_card.c index c9fcbdc164ea..aaccdb9f4ba1 100644 --- a/drivers/pcmcia/vrc4171_card.c +++ b/drivers/pcmcia/vrc4171_card.c @@ -105,6 +105,7 @@ typedef struct vrc4171_socket { char name[24]; int csc_irq; int io_irq; + spinlock_t lock; } vrc4171_socket_t; static vrc4171_socket_t vrc4171_sockets[CARD_MAX_SLOTS]; @@ -327,7 +328,7 @@ static int pccard_set_socket(struct pcmcia_socket *sock, socket_state_t *state) slot = sock->sock; socket = &vrc4171_sockets[slot]; - spin_lock_irq(&sock->lock); + spin_lock_irq(&socket->lock); voltage = set_Vcc_value(state->Vcc); exca_write_byte(slot, CARD_VOLTAGE_SELECT, voltage); @@ -370,7 +371,7 @@ static int pccard_set_socket(struct pcmcia_socket *sock, socket_state_t *state) cscint |= I365_CSC_DETECT; exca_write_byte(slot, I365_CSCINT, cscint); - spin_unlock_irq(&sock->lock); + spin_unlock_irq(&socket->lock); return 0; } -- cgit v1.2.3 From 7a96e87d6e58a07235a2bc3eff9b093af4937a72 Mon Sep 17 00:00:00 2001 From: Dominik Brodowski Date: Sat, 13 Mar 2010 17:42:39 +0100 Subject: pcmcia: pd6729, i82092: use parent (PCI) resources A newly added parent resource entry for the root PCI bus, such as 40000000-ffffffff : PCI Bus #00 means that the pd6729 and i82092 drivers cannot allocate iomem as freely as before, unless they do so as PCI devices. Therefore, set socket->cb_dev so that rsrc_nonstatic.c does the right thing. Reported-by: Komuro Signed-off-by: Dominik Brodowski --- drivers/pcmcia/i82092.c | 1 + drivers/pcmcia/pd6729.c | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c index a04f21c8170f..f5da62653313 100644 --- a/drivers/pcmcia/i82092.c +++ b/drivers/pcmcia/i82092.c @@ -133,6 +133,7 @@ static int __devinit i82092aa_pci_probe(struct pci_dev *dev, const struct pci_de sockets[i].socket.map_size = 0x1000; sockets[i].socket.irq_mask = 0; sockets[i].socket.pci_irq = dev->irq; + sockets[i].socket.cb_dev = dev; sockets[i].socket.owner = THIS_MODULE; sockets[i].number = i; diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c index 7c204910a777..7ba57a565cd7 100644 --- a/drivers/pcmcia/pd6729.c +++ b/drivers/pcmcia/pd6729.c @@ -671,6 +671,7 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev, socket[i].socket.map_size = 0x1000; socket[i].socket.irq_mask = mask; socket[i].socket.pci_irq = dev->irq; + socket[i].socket.cb_dev = dev; socket[i].socket.owner = THIS_MODULE; socket[i].number = i; -- cgit v1.2.3 From b416cd8efb6ce2661f8f98f603972f0b8f796ee4 Mon Sep 17 00:00:00 2001 From: Dominik Brodowski Date: Tue, 9 Mar 2010 17:17:36 +0100 Subject: pcmcia: revert "irq probe can be done without risking an IRQ storm" This reverts commit 635416ef393e8cec5a89fc6c1de710ee9596a51e. The argument passed to request_irq() only affects action->flags (IRQF_*), but IRQ_NOAUTOEN relates to desc->status. Reported-by: Jan Beulich CC: Alan Cox Signed-off-by: Dominik Brodowski --- drivers/pcmcia/pcmcia_resource.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c index b2df04199a21..ef782c09f029 100644 --- a/drivers/pcmcia/pcmcia_resource.c +++ b/drivers/pcmcia/pcmcia_resource.c @@ -752,14 +752,6 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req) #ifdef CONFIG_PCMCIA_PROBE -#ifdef IRQ_NOAUTOEN - /* if the underlying IRQ infrastructure allows for it, only allocate - * the IRQ, but do not enable it - */ - if (!(req->Handler)) - type |= IRQ_NOAUTOEN; -#endif /* IRQ_NOAUTOEN */ - if (s->irq.AssignedIRQ != 0) { /* If the interrupt is already assigned, it must be the same */ irq = s->irq.AssignedIRQ; -- cgit v1.2.3 From 28ca8dd71fc170090edca62cb8129625d01b7760 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jens=20K=C3=BCnzer?= Date: Sat, 6 Mar 2010 07:46:16 +0100 Subject: pcmcia: honor saved flags in yenta_socket's I365_CSCINT register Instead of overwriting the I365_CSCINT register, save the old value and merely change the bits we care about. Part 1 of a series to allow the ISA irq to be used for Cardbus devices if the socket's PCI irq is unusable. [linux@dominikbrodowski.net: split up the original patch, commit message] Signed-off-by: Jens Kuenzer Signed-off-by: Dominik Brodowski --- drivers/pcmcia/i82365.h | 1 + drivers/pcmcia/yenta_socket.c | 15 +++++++++++---- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/drivers/pcmcia/i82365.h b/drivers/pcmcia/i82365.h index 849ef1b5d687..3f84d7a2dc84 100644 --- a/drivers/pcmcia/i82365.h +++ b/drivers/pcmcia/i82365.h @@ -95,6 +95,7 @@ #define I365_CSC_DETECT 0x08 #define I365_CSC_ANY 0x0F #define I365_CSC_GPI 0x10 +#define I365_CSC_IRQ_MASK 0xF0 /* Flags for I365_ADDRWIN */ #define I365_ENA_IO(map) (0x40 << (map)) diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c index 967c766f53ba..42f6763db400 100644 --- a/drivers/pcmcia/yenta_socket.c +++ b/drivers/pcmcia/yenta_socket.c @@ -356,7 +356,9 @@ static int yenta_set_socket(struct pcmcia_socket *sock, socket_state_t *state) exca_writeb(socket, I365_POWER, reg); /* CSC interrupt: no ISA irq for CSC */ - reg = I365_CSC_DETECT; + reg = exca_readb(socket, I365_CSCINT); + reg &= I365_CSC_IRQ_MASK; + reg |= I365_CSC_DETECT; if (state->flags & SS_IOCARD) { if (state->csc_mask & SS_STSCHG) reg |= I365_CSC_STSCHG; @@ -912,6 +914,7 @@ static unsigned int yenta_probe_irq(struct yenta_socket *socket, u32 isa_irq_mas int i; unsigned long val; u32 mask; + u8 reg; /* * Probe for usable interrupts using the force @@ -919,6 +922,7 @@ static unsigned int yenta_probe_irq(struct yenta_socket *socket, u32 isa_irq_mas */ cb_writel(socket, CB_SOCKET_EVENT, -1); cb_writel(socket, CB_SOCKET_MASK, CB_CSTSMASK); + reg = exca_readb(socket, I365_CSCINT); exca_writeb(socket, I365_CSCINT, 0); val = probe_irq_on() & isa_irq_mask; for (i = 1; i < 16; i++) { @@ -930,7 +934,7 @@ static unsigned int yenta_probe_irq(struct yenta_socket *socket, u32 isa_irq_mas cb_writel(socket, CB_SOCKET_EVENT, -1); } cb_writel(socket, CB_SOCKET_MASK, 0); - exca_writeb(socket, I365_CSCINT, 0); + exca_writeb(socket, I365_CSCINT, reg); mask = probe_irq_mask(val) & 0xffff; @@ -967,6 +971,8 @@ static irqreturn_t yenta_probe_handler(int irq, void *dev_id) /* probes the PCI interrupt, use only on override functions */ static int yenta_probe_cb_irq(struct yenta_socket *socket) { + u8 reg; + if (!socket->cb_irq) return -1; @@ -979,7 +985,8 @@ static int yenta_probe_cb_irq(struct yenta_socket *socket) } /* generate interrupt, wait */ - exca_writeb(socket, I365_CSCINT, I365_CSC_STSCHG); + reg = exca_readb(socket, I365_CSCINT); + exca_writeb(socket, I365_CSCINT, reg | I365_CSC_STSCHG); cb_writel(socket, CB_SOCKET_EVENT, -1); cb_writel(socket, CB_SOCKET_MASK, CB_CSTSMASK); cb_writel(socket, CB_SOCKET_FORCE, CB_FCARDSTS); @@ -988,7 +995,7 @@ static int yenta_probe_cb_irq(struct yenta_socket *socket) /* disable interrupts */ cb_writel(socket, CB_SOCKET_MASK, 0); - exca_writeb(socket, I365_CSCINT, 0); + exca_writeb(socket, I365_CSCINT, reg); cb_writel(socket, CB_SOCKET_EVENT, -1); exca_readb(socket, I365_CSC); -- cgit v1.2.3 From ba8819e991ac507fcbfa080eacdff3e7eea4dc03 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jens=20K=C3=BCnzer?= Date: Sat, 6 Mar 2010 08:02:24 +0100 Subject: pcmcia: allow for cb_irq to differ from pci_dev's irq in yenta_socket cb_irq is presumed to be the same as the pci_dev's irq. This won't be true any more as soon as we allow the ISA irq to be used for Cardbus devices. Therefore, use the pci_dev's irq explicitely whenever we care about it. Part 2 of a series to allow the ISA irq to be used for Cardbus devices if the socket's PCI irq is unusable. [linux@dominikbrodowski.net: split up the original patch, commit message] Signed-off-by: Jens Kuenzer Signed-off-by: Dominik Brodowski --- drivers/pcmcia/yenta_socket.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c index 42f6763db400..51ee68dbc613 100644 --- a/drivers/pcmcia/yenta_socket.c +++ b/drivers/pcmcia/yenta_socket.c @@ -329,8 +329,8 @@ static int yenta_set_socket(struct pcmcia_socket *sock, socket_state_t *state) /* ISA interrupt control? */ intr = exca_readb(socket, I365_INTCTL); intr = (intr & ~0xf); - if (!socket->cb_irq) { - intr |= state->io_irq; + if (!socket->dev->irq) { + intr |= socket->cb_irq ? socket->cb_irq : state->io_irq; bridge |= CB_BRIDGE_INTR; } exca_writeb(socket, I365_INTCTL, intr); @@ -340,7 +340,7 @@ static int yenta_set_socket(struct pcmcia_socket *sock, socket_state_t *state) reg = exca_readb(socket, I365_INTCTL) & (I365_RING_ENA | I365_INTR_ENA); reg |= (state->flags & SS_RESET) ? 0 : I365_PC_RESET; reg |= (state->flags & SS_IOCARD) ? I365_PC_IOCARD : 0; - if (state->io_irq != socket->cb_irq) { + if (state->io_irq != socket->dev->irq) { reg |= state->io_irq; bridge |= CB_BRIDGE_INTR; } -- cgit v1.2.3 From 0d3a940de51c47a3d6322537c8dce925db755477 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jens=20K=C3=BCnzer?= Date: Sat, 6 Mar 2010 08:27:22 +0100 Subject: pcmcia: re-route Cardbus IRQ to ISA on ti1130 bridges if necessary As the PCI irq pin of the ti1130 pcmcia bridge is not connected (at least on some old IBM Thinkpad 760ED notebooks), the Cardbus IRQ has to be routed to an ISA irq. Part 3 of a series to allow the ISA irq to be used for Cardbus devices if the socket's PCI irq is unusable. [linux@dominikbrodowski.net: split up the original patch, commit message, cleanup] Signed-off-by: Jens Kuenzer Signed-off-by: Dominik Brodowski --- drivers/pcmcia/ti113x.h | 37 +++++++++++++++++++++++++++++++++++-- drivers/pcmcia/yenta_socket.c | 25 ++++++++++++++----------- 2 files changed, 49 insertions(+), 13 deletions(-) diff --git a/drivers/pcmcia/ti113x.h b/drivers/pcmcia/ti113x.h index aaa70227bfb0..9ffa97d0b16c 100644 --- a/drivers/pcmcia/ti113x.h +++ b/drivers/pcmcia/ti113x.h @@ -296,7 +296,7 @@ static int ti_init(struct yenta_socket *socket) u8 new, reg = exca_readb(socket, I365_INTCTL); new = reg & ~I365_INTR_ENA; - if (socket->cb_irq) + if (socket->dev->irq) new |= I365_INTR_ENA; if (new != reg) exca_writeb(socket, I365_INTCTL, new); @@ -316,14 +316,47 @@ static int ti_override(struct yenta_socket *socket) return 0; } +static void ti113x_use_isa_irq(struct yenta_socket *socket) +{ + int isa_irq = -1; + u8 intctl; + u32 isa_irq_mask = 0; + + if (!isa_probe) + return; + + /* get a free isa int */ + isa_irq_mask = yenta_probe_irq(socket, isa_interrupts); + if (!isa_irq_mask) + return; /* no useable isa irq found */ + + /* choose highest available */ + for (; isa_irq_mask; isa_irq++) + isa_irq_mask >>= 1; + socket->cb_irq = isa_irq; + + exca_writeb(socket, I365_CSCINT, (isa_irq << 4)); + + intctl = exca_readb(socket, I365_INTCTL); + intctl &= ~(I365_INTR_ENA | I365_IRQ_MASK); /* CSC Enable */ + exca_writeb(socket, I365_INTCTL, intctl); + + dev_info(&socket->dev->dev, + "Yenta TI113x: using isa irq %d for CardBus\n", isa_irq); +} + + static int ti113x_override(struct yenta_socket *socket) { u8 cardctl; cardctl = config_readb(socket, TI113X_CARD_CONTROL); cardctl &= ~(TI113X_CCR_PCI_IRQ_ENA | TI113X_CCR_PCI_IREQ | TI113X_CCR_PCI_CSC); - if (socket->cb_irq) + if (socket->dev->irq) cardctl |= TI113X_CCR_PCI_IRQ_ENA | TI113X_CCR_PCI_CSC | TI113X_CCR_PCI_IREQ; + else + ti113x_use_isa_irq(socket); + config_writeb(socket, TI113X_CARD_CONTROL, cardctl); return ti_override(socket); diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c index 51ee68dbc613..418988ab6edf 100644 --- a/drivers/pcmcia/yenta_socket.c +++ b/drivers/pcmcia/yenta_socket.c @@ -42,6 +42,18 @@ module_param_string(o2_speedup, o2_speedup, sizeof(o2_speedup), 0444); MODULE_PARM_DESC(o2_speedup, "Use prefetch/burst for O2-bridges: 'on', 'off' " "or 'default' (uses recommended behaviour for the detected bridge)"); +/* + * Only probe "regular" interrupts, don't + * touch dangerous spots like the mouse irq, + * because there are mice that apparently + * get really confused if they get fondled + * too intimately. + * + * Default to 11, 10, 9, 7, 6, 5, 4, 3. + */ +static u32 isa_interrupts = 0x0ef8; + + #define debug(x, s, args...) dev_dbg(&s->dev->dev, x, ##args) /* Don't ask.. */ @@ -54,6 +66,8 @@ MODULE_PARM_DESC(o2_speedup, "Use prefetch/burst for O2-bridges: 'on', 'off' " */ #ifdef CONFIG_YENTA_TI static int yenta_probe_cb_irq(struct yenta_socket *socket); +static unsigned int yenta_probe_irq(struct yenta_socket *socket, + u32 isa_irq_mask); #endif @@ -898,17 +912,6 @@ static struct cardbus_type cardbus_type[] = { }; -/* - * Only probe "regular" interrupts, don't - * touch dangerous spots like the mouse irq, - * because there are mice that apparently - * get really confused if they get fondled - * too intimately. - * - * Default to 11, 10, 9, 7, 6, 5, 4, 3. - */ -static u32 isa_interrupts = 0x0ef8; - static unsigned int yenta_probe_irq(struct yenta_socket *socket, u32 isa_irq_mask) { int i; -- cgit v1.2.3 From e794c01b7de40d180417eacbd910e8f31f2fafeb Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 15 Mar 2010 11:25:10 +0300 Subject: pcmcia: add important if statement There was a problem introduced in Jul 2008 by: 0e6f9d270840 pcmcia: use pcmcia_loop_config in scsi pcmcia drivers Signed-off-by: Dan Carpenter Signed-off-by: Dominik Brodowski --- drivers/scsi/pcmcia/nsp_cs.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c index c2341af587a3..021246454872 100644 --- a/drivers/scsi/pcmcia/nsp_cs.c +++ b/drivers/scsi/pcmcia/nsp_cs.c @@ -1717,6 +1717,7 @@ static int nsp_cs_config(struct pcmcia_device *link) cfg_mem->data = data; ret = pcmcia_loop_config(link, nsp_cs_config_check, cfg_mem); + if (ret) goto cs_failed; if (link->conf.Attributes & CONF_ENABLE_IRQ) { -- cgit v1.2.3 From b89ebadc3421f8ff85030768ec14144794984877 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 15 Mar 2010 14:29:22 +0000 Subject: ARM: Add final piece to fix XIP decompressor in read-only memory This defines STATIC_RW_DATA, which prevents the read/write malloc management data being declared with a static attribute. Signed-off-by: Russell King --- arch/arm/boot/compressed/decompress.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm/boot/compressed/decompress.c b/arch/arm/boot/compressed/decompress.c index 0da382f33157..9c097073ce4c 100644 --- a/arch/arm/boot/compressed/decompress.c +++ b/arch/arm/boot/compressed/decompress.c @@ -11,6 +11,7 @@ extern unsigned long free_mem_end_ptr; extern void error(char *); #define STATIC static +#define STATIC_RW_DATA /* non-static please */ #define ARCH_HAS_DECOMP_WDOG -- cgit v1.2.3 From 75216859d96f66856a0ee78c2de5b02115ff65f3 Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Mon, 15 Mar 2010 15:14:50 +0100 Subject: ARM: 5990/1: ARM: use __armv5tej_mmu_cache_flush for V5TEJ instead of __armv4_mmu_cache_flush This got broken with commit 0e056f20 Signed-off-by: Sascha Hauer Signed-off-by: Russell King --- arch/arm/boot/compressed/head.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 535a91daaa53..0f23009170a1 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -742,7 +742,7 @@ proc_types: .word 0x000f0000 W(b) __armv4_mmu_cache_on W(b) __armv4_mmu_cache_off - W(b) __armv4_mmu_cache_flush + W(b) __armv5tej_mmu_cache_flush .word 0x0007b000 @ ARMv6 .word 0x000ff000 -- cgit v1.2.3 From 5d8614cc5d6c5c02b7995ed97303481d4e3a8cc7 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 12 Mar 2010 11:03:58 +0100 Subject: ARM: 5989/1: ARM: KGDB: add support for SMP platforms To support SMP platforms, KGDB requires the architecture backend to implement the kgdb_roundup_cpus function. This patch, taken against 2.6.33, implements the function for ARM based on the MIPS port. Cc: Jason Wessel Cc: Jean-Michel Hautbois Cc: KGDB Mailing List Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/kernel/kgdb.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c index ba8ccfede964..a5b846b9895d 100644 --- a/arch/arm/kernel/kgdb.c +++ b/arch/arm/kernel/kgdb.c @@ -9,6 +9,7 @@ * Authors: George Davis * Deepak Saxena */ +#include #include #include @@ -158,6 +159,18 @@ static struct undef_hook kgdb_compiled_brkpt_hook = { .fn = kgdb_compiled_brk_fn }; +static void kgdb_call_nmi_hook(void *ignored) +{ + kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs()); +} + +void kgdb_roundup_cpus(unsigned long flags) +{ + local_irq_enable(); + smp_call_function(kgdb_call_nmi_hook, NULL, 0); + local_irq_disable(); +} + /** * kgdb_arch_init - Perform any architecture specific initalization. * -- cgit v1.2.3 From 80c43ed724797627d8f86855248c497a6161a214 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Mon, 15 Mar 2010 15:51:53 +0100 Subject: ALSA: hda - Disable MSI for Nvidia controller Judging from the member of enable_msi white-list, Nvidia controller seems to cause troubles with MSI enabled, e.g. boot hang up or other serious issue may come up. It's safer to disable MSI as default for Nvidia controllers again for now. Signed-off-by: Takashi Iwai Cc: --- sound/pci/hda/hda_intel.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 926815201885..027d3f4c1c59 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2378,6 +2378,13 @@ static void __devinit check_msi(struct azx *chip) "hda_intel: msi for device %04x:%04x set to %d\n", q->subvendor, q->subdevice, q->value); chip->msi = q->value; + return; + } + + /* NVidia chipsets seem to cause troubles with MSI */ + if (chip->driver_type == AZX_DRIVER_NVIDIA) { + printk(KERN_INFO "hda_intel: Disable MSI for Nvidia chipset\n"); + chip->msi = 0; } } -- cgit v1.2.3 From 98d377a0894e6bcca44eafd4d2eee74e8af4db83 Mon Sep 17 00:00:00 2001 From: TARUISI Hiroaki Date: Wed, 18 Nov 2009 05:42:14 +0000 Subject: Btrfs: add a function to lookup a directory path by following backrefs This will be used by the inode lookup ioctl. Signed-off-by: TARUISI Hiroaki Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 92 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 92 insertions(+) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 645a17927a8f..ac2a28f4fa1a 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -48,6 +48,7 @@ #include "print-tree.h" #include "volumes.h" #include "locking.h" +#include "ctree.h" /* Mask out flags that are inappropriate for the given type of inode. */ static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags) @@ -743,6 +744,97 @@ out: return ret; } +/* + Search INODE_REFs to identify path name of 'dirid' directory + in a 'tree_id' tree. and sets path name to 'name'. +*/ +static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info, + u64 tree_id, u64 dirid, char *name) +{ + struct btrfs_root *root; + struct btrfs_key key; + char *name_stack, *ptr; + int ret = -1; + int slot; + int len; + int total_len = 0; + struct btrfs_inode_ref *iref; + struct extent_buffer *l; + struct btrfs_path *path; + + if (dirid == BTRFS_FIRST_FREE_OBJECTID) { + name[0]='\0'; + return 0; + } + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + + name_stack = kzalloc(BTRFS_PATH_NAME_MAX+1, GFP_NOFS); + if (!name_stack) { + btrfs_free_path(path); + return -ENOMEM; + } + + ptr = &name_stack[BTRFS_PATH_NAME_MAX]; + + key.objectid = tree_id; + key.type = BTRFS_ROOT_ITEM_KEY; + key.offset = (u64)-1; + root = btrfs_read_fs_root_no_name(info, &key); + if (IS_ERR(root)) { + printk(KERN_ERR "could not find root %llu\n", tree_id); + return -ENOENT; + } + + key.objectid = dirid; + key.type = BTRFS_INODE_REF_KEY; + key.offset = 0; + + while(1) { + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) + goto out; + + l = path->nodes[0]; + slot = path->slots[0]; + btrfs_item_key_to_cpu(l, &key, slot); + + if (ret > 0 && (key.objectid != dirid || + key.type != BTRFS_INODE_REF_KEY)) + goto out; + + iref = btrfs_item_ptr(l, slot, struct btrfs_inode_ref); + len = btrfs_inode_ref_name_len(l, iref); + ptr -= len + 1; + total_len += len + 1; + if (ptr < name_stack) + goto out; + + *(ptr + len) = '/'; + read_extent_buffer(l, ptr,(unsigned long)(iref + 1), len); + + if (key.offset == BTRFS_FIRST_FREE_OBJECTID) + break; + + btrfs_release_path(root, path); + key.objectid = key.offset; + key.offset = 0; + dirid = key.objectid; + + } + if (ptr < name_stack) + goto out; + strncpy(name, ptr, total_len); + name[total_len]='\0'; + ret = 0; +out: + btrfs_free_path(path); + kfree(name_stack); + return ret; +} + static noinline int btrfs_ioctl_snap_destroy(struct file *file, void __user *arg) { -- cgit v1.2.3 From ac8e9819d71f907a0532b01b22c26b56bbbcbd21 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Sun, 28 Feb 2010 15:39:26 -0500 Subject: Btrfs: add search and inode lookup ioctls The search ioctl is a generic tool for doing btree searches from userland applications. The first user of the search ioctl is a subvolume listing feature, but we'll also use it to find new files in a subvolume. The search ioctl allows you to specify min and max keys to search for, along with min and max transid. It returns the items along with a header that includes the item key. Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 249 +++++++++++++++++++++++++++++++++++++++++++++++++++---- fs/btrfs/ioctl.h | 66 +++++++++++++++ 2 files changed, 299 insertions(+), 16 deletions(-) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index ac2a28f4fa1a..c6044733198d 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -744,16 +744,206 @@ out: return ret; } +static noinline int key_in_sk(struct btrfs_key *key, + struct btrfs_ioctl_search_key *sk) +{ + if (key->objectid < sk->min_objectid) + return 0; + if (key->offset < sk->min_offset) + return 0; + if (key->type < sk->min_type) + return 0; + if (key->objectid > sk->max_objectid) + return 0; + if (key->type > sk->max_type) + return 0; + if (key->offset > sk->max_offset) + return 0; + return 1; +} + +static noinline int copy_to_sk(struct btrfs_root *root, + struct btrfs_path *path, + struct btrfs_key *key, + struct btrfs_ioctl_search_key *sk, + char *buf, + unsigned long *sk_offset, + int *num_found) +{ + u64 found_transid; + struct extent_buffer *leaf; + struct btrfs_ioctl_search_header sh; + unsigned long item_off; + unsigned long item_len; + int nritems; + int i; + int slot; + int found = 0; + int ret = 0; + + leaf = path->nodes[0]; + slot = path->slots[0]; + nritems = btrfs_header_nritems(leaf); + + if (btrfs_header_generation(leaf) > sk->max_transid) { + i = nritems; + goto advance_key; + } + found_transid = btrfs_header_generation(leaf); + + for (i = slot; i < nritems; i++) { + item_off = btrfs_item_ptr_offset(leaf, i); + item_len = btrfs_item_size_nr(leaf, i); + + if (item_len > BTRFS_SEARCH_ARGS_BUFSIZE) + item_len = 0; + + if (sizeof(sh) + item_len + *sk_offset > + BTRFS_SEARCH_ARGS_BUFSIZE) { + ret = 1; + goto overflow; + } + + btrfs_item_key_to_cpu(leaf, key, i); + if (!key_in_sk(key, sk)) + continue; + + sh.objectid = key->objectid; + sh.offset = key->offset; + sh.type = key->type; + sh.len = item_len; + sh.transid = found_transid; + + /* copy search result header */ + memcpy(buf + *sk_offset, &sh, sizeof(sh)); + *sk_offset += sizeof(sh); + + if (item_len) { + char *p = buf + *sk_offset; + /* copy the item */ + read_extent_buffer(leaf, p, + item_off, item_len); + *sk_offset += item_len; + found++; + } + + if (*num_found >= sk->nr_items) + break; + } +advance_key: + if (key->offset < (u64)-1) + key->offset++; + else if (key->type < (u64)-1) + key->type++; + else if (key->objectid < (u64)-1) + key->objectid++; + ret = 0; +overflow: + *num_found += found; + return ret; +} + +static noinline int search_ioctl(struct inode *inode, + struct btrfs_ioctl_search_args *args) +{ + struct btrfs_root *root; + struct btrfs_key key; + struct btrfs_key max_key; + struct btrfs_path *path; + struct btrfs_ioctl_search_key *sk = &args->key; + struct btrfs_fs_info *info = BTRFS_I(inode)->root->fs_info; + int ret; + int num_found = 0; + unsigned long sk_offset = 0; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + + if (sk->tree_id == 0) { + /* search the root of the inode that was passed */ + root = BTRFS_I(inode)->root; + } else { + key.objectid = sk->tree_id; + key.type = BTRFS_ROOT_ITEM_KEY; + key.offset = (u64)-1; + root = btrfs_read_fs_root_no_name(info, &key); + if (IS_ERR(root)) { + printk(KERN_ERR "could not find root %llu\n", + sk->tree_id); + btrfs_free_path(path); + return -ENOENT; + } + } + + key.objectid = sk->min_objectid; + key.type = sk->min_type; + key.offset = sk->min_offset; + + max_key.objectid = sk->max_objectid; + max_key.type = sk->max_type; + max_key.offset = sk->max_offset; + + path->keep_locks = 1; + + while(1) { + ret = btrfs_search_forward(root, &key, &max_key, path, 0, + sk->min_transid); + if (ret != 0) { + if (ret > 0) + ret = 0; + goto err; + } + ret = copy_to_sk(root, path, &key, sk, args->buf, + &sk_offset, &num_found); + btrfs_release_path(root, path); + if (ret || num_found >= sk->nr_items) + break; + + } + ret = 0; +err: + sk->nr_items = num_found; + btrfs_free_path(path); + return ret; +} + +static noinline int btrfs_ioctl_tree_search(struct file *file, + void __user *argp) +{ + struct btrfs_ioctl_search_args *args; + struct inode *inode; + int ret; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + args = kmalloc(sizeof(*args), GFP_KERNEL); + if (!args) + return -ENOMEM; + + if (copy_from_user(args, argp, sizeof(*args))) { + kfree(args); + return -EFAULT; + } + inode = fdentry(file)->d_inode; + ret = search_ioctl(inode, args); + if (ret == 0 && copy_to_user(argp, args, sizeof(*args))) + ret = -EFAULT; + kfree(args); + return ret; +} + /* - Search INODE_REFs to identify path name of 'dirid' directory - in a 'tree_id' tree. and sets path name to 'name'. -*/ + * Search INODE_REFs to identify path name of 'dirid' directory + * in a 'tree_id' tree. and sets path name to 'name'. + */ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info, u64 tree_id, u64 dirid, char *name) { struct btrfs_root *root; struct btrfs_key key; - char *name_stack, *ptr; + char *ptr; int ret = -1; int slot; int len; @@ -771,13 +961,7 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info, if (!path) return -ENOMEM; - name_stack = kzalloc(BTRFS_PATH_NAME_MAX+1, GFP_NOFS); - if (!name_stack) { - btrfs_free_path(path); - return -ENOMEM; - } - - ptr = &name_stack[BTRFS_PATH_NAME_MAX]; + ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX]; key.objectid = tree_id; key.type = BTRFS_ROOT_ITEM_KEY; @@ -802,14 +986,16 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info, btrfs_item_key_to_cpu(l, &key, slot); if (ret > 0 && (key.objectid != dirid || - key.type != BTRFS_INODE_REF_KEY)) + key.type != BTRFS_INODE_REF_KEY)) { + ret = -ENOENT; goto out; + } iref = btrfs_item_ptr(l, slot, struct btrfs_inode_ref); len = btrfs_inode_ref_name_len(l, iref); ptr -= len + 1; total_len += len + 1; - if (ptr < name_stack) + if (ptr < name) goto out; *(ptr + len) = '/'; @@ -824,14 +1010,41 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info, dirid = key.objectid; } - if (ptr < name_stack) + if (ptr < name) goto out; - strncpy(name, ptr, total_len); + memcpy(name, ptr, total_len); name[total_len]='\0'; ret = 0; out: btrfs_free_path(path); - kfree(name_stack); + return ret; +} + +static noinline int btrfs_ioctl_ino_lookup(struct file *file, + void __user *argp) +{ + struct btrfs_ioctl_ino_lookup_args *args; + struct inode *inode; + int ret; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + args = kmalloc(sizeof(*args), GFP_KERNEL); + if (copy_from_user(args, argp, sizeof(*args))) { + kfree(args); + return -EFAULT; + } + inode = fdentry(file)->d_inode; + + ret = btrfs_search_path_in_tree(BTRFS_I(inode)->root->fs_info, + args->treeid, args->objectid, + args->name); + + if (ret == 0 && copy_to_user(argp, args, sizeof(*args))) + ret = -EFAULT; + + kfree(args); return ret; } @@ -1430,6 +1643,10 @@ long btrfs_ioctl(struct file *file, unsigned int return btrfs_ioctl_trans_start(file); case BTRFS_IOC_TRANS_END: return btrfs_ioctl_trans_end(file); + case BTRFS_IOC_TREE_SEARCH: + return btrfs_ioctl_tree_search(file, argp); + case BTRFS_IOC_INO_LOOKUP: + return btrfs_ioctl_ino_lookup(file, argp); case BTRFS_IOC_SYNC: btrfs_sync_fs(file->f_dentry->d_sb, 1); return 0; diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h index bc49914475eb..79c07b104f91 100644 --- a/fs/btrfs/ioctl.h +++ b/fs/btrfs/ioctl.h @@ -30,6 +30,68 @@ struct btrfs_ioctl_vol_args { char name[BTRFS_PATH_NAME_MAX + 1]; }; +#define BTRFS_INO_LOOKUP_PATH_MAX 4080 +struct btrfs_ioctl_ino_lookup_args { + __u64 treeid; + __u64 objectid; + char name[BTRFS_INO_LOOKUP_PATH_MAX]; +}; + +struct btrfs_ioctl_search_key { + /* which root are we searching. 0 is the tree of tree roots */ + __u64 tree_id; + + /* keys returned will be >= min and <= max */ + __u64 min_objectid; + __u64 max_objectid; + + /* keys returned will be >= min and <= max */ + __u64 min_offset; + __u64 max_offset; + + /* max and min transids to search for */ + __u64 min_transid; + __u64 max_transid; + + /* keys returned will be >= min and <= max */ + __u32 min_type; + __u32 max_type; + + /* + * how many items did userland ask for, and how many are we + * returning + */ + __u32 nr_items; + + /* align to 64 bits */ + __u32 unused; + + /* some extra for later */ + __u64 unused1; + __u64 unused2; + __u64 unused3; + __u64 unused4; +}; + +struct btrfs_ioctl_search_header { + __u64 transid; + __u64 objectid; + __u64 offset; + __u32 type; + __u32 len; +}; + +#define BTRFS_SEARCH_ARGS_BUFSIZE (4096 - sizeof(struct btrfs_ioctl_search_key)) +/* + * the buf is an array of search headers where + * each header is followed by the actual item + * the type field is expanded to 32 bits for alignment + */ +struct btrfs_ioctl_search_args { + struct btrfs_ioctl_search_key key; + char buf[BTRFS_SEARCH_ARGS_BUFSIZE]; +}; + struct btrfs_ioctl_clone_range_args { __s64 src_fd; __u64 src_offset, src_length; @@ -67,4 +129,8 @@ struct btrfs_ioctl_clone_range_args { struct btrfs_ioctl_vol_args) #define BTRFS_IOC_SNAP_DESTROY _IOW(BTRFS_IOCTL_MAGIC, 15, \ struct btrfs_ioctl_vol_args) +#define BTRFS_IOC_TREE_SEARCH _IOWR(BTRFS_IOCTL_MAGIC, 17, \ + struct btrfs_ioctl_search_args) +#define BTRFS_IOC_INO_LOOKUP _IOWR(BTRFS_IOCTL_MAGIC, 18, \ + struct btrfs_ioctl_ino_lookup_args) #endif -- cgit v1.2.3 From 12534832cb7b0abc7369298246e8b7af03b863ca Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 17 Dec 2009 21:32:27 +0000 Subject: Btrfs: make set/get functions for the super compat_ro flags use compat_ro Our set/get functions for compat_ro_flags actually look at compat_flags. This will mess any attempt to use compat flags up. The fix is obvious. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 2aa8ec6a0981..abbce4d90c1b 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1842,7 +1842,7 @@ BTRFS_SETGET_STACK_FUNCS(super_num_devices, struct btrfs_super_block, BTRFS_SETGET_STACK_FUNCS(super_compat_flags, struct btrfs_super_block, compat_flags, 64); BTRFS_SETGET_STACK_FUNCS(super_compat_ro_flags, struct btrfs_super_block, - compat_flags, 64); + compat_ro_flags, 64); BTRFS_SETGET_STACK_FUNCS(super_incompat_flags, struct btrfs_super_block, incompat_flags, 64); BTRFS_SETGET_STACK_FUNCS(super_csum_type, struct btrfs_super_block, -- cgit v1.2.3 From 73f73415caddbc01d9f10c03e0a677d5b3d11569 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 4 Dec 2009 17:38:27 +0000 Subject: Btrfs: change how we mount subvolumes This work is in preperation for being able to set a different root as the default mounting root. There is currently a problem with how we mount subvolumes. We cannot currently mount a subvolume of a subvolume, you can only mount subvolumes/snapshots of the default subvolume. So say you take a snapshot of the default subvolume and call it snap1, and then take a snapshot of snap1 and call it snap2, so now you have / /snap1 /snap1/snap2 as your available volumes. Currently you can only mount / and /snap1, you cannot mount /snap1/snap2. To fix this problem instead of passing subvolid= you must pass in subvolid=, where is the tree id that gets spit out via the subvolume listing you get from the subvolume listing patches (btrfs filesystem list). This allows us to mount /, /snap1 and /snap1/snap2 as the root volume. In addition to the above, we also now read the default dir item in the tree root to get the root key that it points to. For now this just points at what has always been the default subvolme, but later on I plan to change it to point at whatever root you want to be the new default root, so you can just set the default mount and not have to mount with -o subvolid=. I tested this out with the above scenario and it worked perfectly. Thanks, mount -o subvol operates inside the selected subvolid. For example: mount -o subvol=snap1,subvolid=256 /dev/xxx /mnt /mnt will have the snap1 directory for the subvolume with id 256. mount -o subvol=snap /dev/xxx /mnt /mnt will be the snap directory of whatever the default subvolume is. Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 2 +- fs/btrfs/export.c | 4 +- fs/btrfs/inode.c | 10 +-- fs/btrfs/relocation.c | 2 +- fs/btrfs/super.c | 172 ++++++++++++++++++++++++++++++++++++++++++-------- fs/btrfs/tree-log.c | 2 +- 6 files changed, 158 insertions(+), 34 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index abbce4d90c1b..07d956977a07 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2335,7 +2335,7 @@ int btrfs_init_cachep(void); void btrfs_destroy_cachep(void); long btrfs_ioctl_trans_end(struct file *file); struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, - struct btrfs_root *root); + struct btrfs_root *root, int *was_new); int btrfs_commit_write(struct file *file, struct page *page, unsigned from, unsigned to); struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index ba5c3fd5ab8c..951ef09b82f4 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -95,7 +95,7 @@ static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); key.offset = 0; - inode = btrfs_iget(sb, &key, root); + inode = btrfs_iget(sb, &key, root, NULL); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto fail; @@ -223,7 +223,7 @@ static struct dentry *btrfs_get_parent(struct dentry *child) key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; - dentry = d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root)); + dentry = d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root, NULL)); if (!IS_ERR(dentry)) dentry->d_op = &btrfs_dentry_operations; return dentry; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 4deb280f8969..7d10d1ccb0fe 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2153,7 +2153,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) found_key.objectid = found_key.offset; found_key.type = BTRFS_INODE_ITEM_KEY; found_key.offset = 0; - inode = btrfs_iget(root->fs_info->sb, &found_key, root); + inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL); if (IS_ERR(inode)) break; @@ -3687,7 +3687,7 @@ static struct inode *btrfs_iget_locked(struct super_block *s, * Returns in *is_new if the inode was read from disk */ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, - struct btrfs_root *root) + struct btrfs_root *root, int *new) { struct inode *inode; @@ -3702,6 +3702,8 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, inode_tree_add(inode); unlock_new_inode(inode); + if (new) + *new = 1; } return inode; @@ -3754,7 +3756,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) return NULL; if (location.type == BTRFS_INODE_ITEM_KEY) { - inode = btrfs_iget(dir->i_sb, &location, root); + inode = btrfs_iget(dir->i_sb, &location, root, NULL); return inode; } @@ -3769,7 +3771,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) else inode = new_simple_dir(dir->i_sb, &location, sub_root); } else { - inode = btrfs_iget(dir->i_sb, &location, sub_root); + inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL); } srcu_read_unlock(&root->fs_info->subvol_srcu, index); diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 0109e5606bad..d52759daa53f 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -3487,7 +3487,7 @@ static struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, key.objectid = objectid; key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; - inode = btrfs_iget(root->fs_info->sb, &key, root); + inode = btrfs_iget(root->fs_info->sb, &key, root, NULL); BUG_ON(IS_ERR(inode) || is_bad_inode(inode)); BTRFS_I(inode)->index_cnt = group->key.objectid; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index f8b4521de907..f878337cee6f 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -63,10 +63,10 @@ static void btrfs_put_super(struct super_block *sb) } enum { - Opt_degraded, Opt_subvol, Opt_device, Opt_nodatasum, Opt_nodatacow, - Opt_max_extent, Opt_max_inline, Opt_alloc_start, Opt_nobarrier, - Opt_ssd, Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, - Opt_compress, Opt_compress_force, Opt_notreelog, Opt_ratio, + Opt_degraded, Opt_subvol, Opt_subvolid, Opt_device, Opt_nodatasum, + Opt_nodatacow, Opt_max_extent, Opt_max_inline, Opt_alloc_start, + Opt_nobarrier, Opt_ssd, Opt_nossd, Opt_ssd_spread, Opt_thread_pool, + Opt_noacl, Opt_compress, Opt_compress_force, Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, Opt_err, }; @@ -74,6 +74,7 @@ enum { static match_table_t tokens = { {Opt_degraded, "degraded"}, {Opt_subvol, "subvol=%s"}, + {Opt_subvolid, "subvolid=%d"}, {Opt_device, "device=%s"}, {Opt_nodatasum, "nodatasum"}, {Opt_nodatacow, "nodatacow"}, @@ -157,6 +158,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) btrfs_set_opt(info->mount_opt, DEGRADED); break; case Opt_subvol: + case Opt_subvolid: case Opt_device: /* * These are parsed by btrfs_parse_early_options @@ -292,12 +294,13 @@ out: * only when we need to allocate a new super block. */ static int btrfs_parse_early_options(const char *options, fmode_t flags, - void *holder, char **subvol_name, + void *holder, char **subvol_name, u64 *subvol_objectid, struct btrfs_fs_devices **fs_devices) { substring_t args[MAX_OPT_ARGS]; char *opts, *p; int error = 0; + int intarg; if (!options) goto out; @@ -320,6 +323,12 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, case Opt_subvol: *subvol_name = match_strdup(&args[0]); break; + case Opt_subvolid: + intarg = 0; + match_int(&args[0], &intarg); + if (intarg) + *subvol_objectid = intarg; + break; case Opt_device: error = btrfs_scan_one_device(match_strdup(&args[0]), flags, holder, fs_devices); @@ -347,6 +356,110 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, return error; } +static struct dentry *get_default_root(struct super_block *sb, + u64 subvol_objectid) +{ + struct btrfs_root *root = sb->s_fs_info; + struct btrfs_root *new_root; + struct btrfs_dir_item *di; + struct btrfs_path *path; + struct btrfs_key location; + struct inode *inode; + struct dentry *dentry; + u64 dir_id; + int new = 0; + + /* + * We have a specific subvol we want to mount, just setup location and + * go look up the root. + */ + if (subvol_objectid) { + location.objectid = subvol_objectid; + location.type = BTRFS_ROOT_ITEM_KEY; + location.offset = (u64)-1; + goto find_root; + } + + path = btrfs_alloc_path(); + if (!path) + return ERR_PTR(-ENOMEM); + path->leave_spinning = 1; + + /* + * Find the "default" dir item which points to the root item that we + * will mount by default if we haven't been given a specific subvolume + * to mount. + */ + dir_id = btrfs_super_root_dir(&root->fs_info->super_copy); + di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0); + if (!di) { + /* + * Ok the default dir item isn't there. This is weird since + * it's always been there, but don't freak out, just try and + * mount to root most subvolume. + */ + btrfs_free_path(path); + dir_id = BTRFS_FIRST_FREE_OBJECTID; + new_root = root->fs_info->fs_root; + goto setup_root; + } + + btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); + btrfs_free_path(path); + +find_root: + new_root = btrfs_read_fs_root_no_name(root->fs_info, &location); + if (IS_ERR(new_root)) + return ERR_PTR(PTR_ERR(new_root)); + + if (btrfs_root_refs(&new_root->root_item) == 0) + return ERR_PTR(-ENOENT); + + dir_id = btrfs_root_dirid(&new_root->root_item); +setup_root: + location.objectid = dir_id; + location.type = BTRFS_INODE_ITEM_KEY; + location.offset = 0; + + inode = btrfs_iget(sb, &location, new_root, &new); + if (!inode) + return ERR_PTR(-ENOMEM); + + /* + * If we're just mounting the root most subvol put the inode and return + * a reference to the dentry. We will have already gotten a reference + * to the inode in btrfs_fill_super so we're good to go. + */ + if (!new && sb->s_root->d_inode == inode) { + iput(inode); + return dget(sb->s_root); + } + + if (new) { + const struct qstr name = { .name = "/", .len = 1 }; + + /* + * New inode, we need to make the dentry a sibling of s_root so + * everything gets cleaned up properly on unmount. + */ + dentry = d_alloc(sb->s_root, &name); + if (!dentry) { + iput(inode); + return ERR_PTR(-ENOMEM); + } + d_splice_alias(inode, dentry); + } else { + /* + * We found the inode in cache, just find a dentry for it and + * put the reference to the inode we just got. + */ + dentry = d_find_alias(inode); + iput(inode); + } + + return dentry; +} + static int btrfs_fill_super(struct super_block *sb, struct btrfs_fs_devices *fs_devices, void *data, int silent) @@ -380,7 +493,7 @@ static int btrfs_fill_super(struct super_block *sb, key.objectid = BTRFS_FIRST_FREE_OBJECTID; key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; - inode = btrfs_iget(sb, &key, tree_root->fs_info->fs_root); + inode = btrfs_iget(sb, &key, tree_root->fs_info->fs_root, NULL); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto fail_close; @@ -392,12 +505,6 @@ static int btrfs_fill_super(struct super_block *sb, err = -ENOMEM; goto fail_close; } -#if 0 - /* this does the super kobj at the same time */ - err = btrfs_sysfs_add_super(tree_root->fs_info); - if (err) - goto fail_close; -#endif sb->s_root = root_dentry; @@ -489,19 +596,22 @@ static int btrfs_test_super(struct super_block *s, void *data) static int btrfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, struct vfsmount *mnt) { - char *subvol_name = NULL; struct block_device *bdev = NULL; struct super_block *s; struct dentry *root; struct btrfs_fs_devices *fs_devices = NULL; fmode_t mode = FMODE_READ; + char *subvol_name = NULL; + u64 subvol_objectid = 0; int error = 0; + int found = 0; if (!(flags & MS_RDONLY)) mode |= FMODE_WRITE; error = btrfs_parse_early_options(data, mode, fs_type, - &subvol_name, &fs_devices); + &subvol_name, &subvol_objectid, + &fs_devices); if (error) return error; @@ -530,6 +640,7 @@ static int btrfs_get_sb(struct file_system_type *fs_type, int flags, goto error_close_devices; } + found = 1; btrfs_close_devices(fs_devices); } else { char b[BDEVNAME_SIZE]; @@ -547,25 +658,35 @@ static int btrfs_get_sb(struct file_system_type *fs_type, int flags, s->s_flags |= MS_ACTIVE; } - if (!strcmp(subvol_name, ".")) - root = dget(s->s_root); - else { - mutex_lock(&s->s_root->d_inode->i_mutex); - root = lookup_one_len(subvol_name, s->s_root, + root = get_default_root(s, subvol_objectid); + if (IS_ERR(root)) { + error = PTR_ERR(root); + deactivate_locked_super(s); + goto error; + } + /* if they gave us a subvolume name bind mount into that */ + if (strcmp(subvol_name, ".")) { + struct dentry *new_root; + mutex_lock(&root->d_inode->i_mutex); + new_root = lookup_one_len(subvol_name, root, strlen(subvol_name)); - mutex_unlock(&s->s_root->d_inode->i_mutex); + mutex_unlock(&root->d_inode->i_mutex); - if (IS_ERR(root)) { + if (IS_ERR(new_root)) { deactivate_locked_super(s); - error = PTR_ERR(root); - goto error_free_subvol_name; + error = PTR_ERR(new_root); + dput(root); + goto error_close_devices; } - if (!root->d_inode) { + if (!new_root->d_inode) { dput(root); + dput(new_root); deactivate_locked_super(s); error = -ENXIO; - goto error_free_subvol_name; + goto error_close_devices; } + dput(root); + root = new_root; } mnt->mnt_sb = s; @@ -580,6 +701,7 @@ error_close_devices: btrfs_close_devices(fs_devices); error_free_subvol_name: kfree(subvol_name); +error: return error; } diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 4a9434b622ec..1255fcc8ade5 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -445,7 +445,7 @@ static noinline struct inode *read_one_inode(struct btrfs_root *root, key.objectid = objectid; key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; - inode = btrfs_iget(root->fs_info->sb, &key, root); + inode = btrfs_iget(root->fs_info->sb, &key, root, NULL); if (IS_ERR(inode)) { inode = NULL; } else if (is_bad_inode(inode)) { -- cgit v1.2.3 From 6ef5ed0d386be5c43ec66d6f2999919c0893558b Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 11 Dec 2009 21:11:29 +0000 Subject: Btrfs: add ioctl and incompat flag to set the default mount subvol This patch needs to go along with my previous patch. This lets us set the default dir item's location to whatever root we want to use as our default mounting subvol. With this we don't have to use mount -o subvol= anymore to mount a different subvol, we can just set the new one and it will just magically work. I've done some moderate testing with this, mostly just switching the default mount around, mounting subvols and the default mount at the same time and such, everything seems to work. Thanks, Older kernels would generally be able to still mount the filesystem with the default subvolume set, but it would result in a different volume being mounted, which could be an even more unpleasant suprise for users. So if you set your default subvolume, you can't go back to older kernels. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 4 ++- fs/btrfs/ioctl.c | 75 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ fs/btrfs/ioctl.h | 2 ++ 3 files changed, 80 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 07d956977a07..1166b15e9bf6 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -373,11 +373,13 @@ struct btrfs_super_block { * ones specified below then we will fail to mount */ #define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF (1ULL << 0) +#define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (2ULL << 0) #define BTRFS_FEATURE_COMPAT_SUPP 0ULL #define BTRFS_FEATURE_COMPAT_RO_SUPP 0ULL #define BTRFS_FEATURE_INCOMPAT_SUPP \ - BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF + (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \ + BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL) /* * A leaf is full of items. offset and size tell us where to find diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index c6044733198d..7875a75315d0 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1579,6 +1579,79 @@ out: return ret; } +static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp) +{ + struct inode *inode = fdentry(file)->d_inode; + struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_root *new_root; + struct btrfs_dir_item *di; + struct btrfs_trans_handle *trans; + struct btrfs_path *path; + struct btrfs_key location; + struct btrfs_disk_key disk_key; + struct btrfs_super_block *disk_super; + u64 features; + u64 objectid = 0; + u64 dir_id; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (copy_from_user(&objectid, argp, sizeof(objectid))) + return -EFAULT; + + if (!objectid) + objectid = root->root_key.objectid; + + location.objectid = objectid; + location.type = BTRFS_ROOT_ITEM_KEY; + location.offset = (u64)-1; + + new_root = btrfs_read_fs_root_no_name(root->fs_info, &location); + if (IS_ERR(new_root)) + return PTR_ERR(new_root); + + if (btrfs_root_refs(&new_root->root_item) == 0) + return -ENOENT; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + path->leave_spinning = 1; + + trans = btrfs_start_transaction(root, 1); + if (!trans) { + btrfs_free_path(path); + return -ENOMEM; + } + + dir_id = btrfs_super_root_dir(&root->fs_info->super_copy); + di = btrfs_lookup_dir_item(trans, root->fs_info->tree_root, path, + dir_id, "default", 7, 1); + if (!di) { + btrfs_free_path(path); + btrfs_end_transaction(trans, root); + printk(KERN_ERR "Umm, you don't have the default dir item, " + "this isn't going to work\n"); + return -ENOENT; + } + + btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key); + btrfs_set_dir_item_key(path->nodes[0], di, &disk_key); + btrfs_mark_buffer_dirty(path->nodes[0]); + btrfs_free_path(path); + + disk_super = &root->fs_info->super_copy; + features = btrfs_super_incompat_flags(disk_super); + if (!(features & BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL)) { + features |= BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL; + btrfs_set_super_incompat_flags(disk_super, features); + } + btrfs_end_transaction(trans, root); + + return 0; +} + /* * there are many ways the trans_start and trans_end ioctls can lead * to deadlocks. They should only be used by applications that @@ -1625,6 +1698,8 @@ long btrfs_ioctl(struct file *file, unsigned int return btrfs_ioctl_snap_create(file, argp, 1); case BTRFS_IOC_SNAP_DESTROY: return btrfs_ioctl_snap_destroy(file, argp); + case BTRFS_IOC_DEFAULT_SUBVOL: + return btrfs_ioctl_default_subvol(file, argp); case BTRFS_IOC_DEFRAG: return btrfs_ioctl_defrag(file); case BTRFS_IOC_RESIZE: diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h index 79c07b104f91..f1923e0260e3 100644 --- a/fs/btrfs/ioctl.h +++ b/fs/btrfs/ioctl.h @@ -1,3 +1,4 @@ + /* * Copyright (C) 2007 Oracle. All rights reserved. * @@ -133,4 +134,5 @@ struct btrfs_ioctl_clone_range_args { struct btrfs_ioctl_search_args) #define BTRFS_IOC_INO_LOOKUP _IOWR(BTRFS_IOCTL_MAGIC, 18, \ struct btrfs_ioctl_ino_lookup_args) +#define BTRFS_IOC_DEFAULT_SUBVOL _IOW(BTRFS_IOCTL_MAGIC, 19, u64) #endif -- cgit v1.2.3 From 4849f01d153be0f52b8191ee1be0ce492aa96811 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 14 Dec 2009 19:18:38 +0000 Subject: Btrfs: make subvolid=0 mount the original default root Since theres not a good way to make sure the user sees the original default root tree id, and not to mention it's 5 so is way different than any other volume, just make subvol=0 mount the original default root. This makes it a bit easier for users to handle in the long run. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/super.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index f878337cee6f..9771eb8694b6 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -325,9 +325,15 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, break; case Opt_subvolid: intarg = 0; - match_int(&args[0], &intarg); - if (intarg) - *subvol_objectid = intarg; + error = match_int(&args[0], &intarg); + if (!error) { + /* we want the original fs_tree */ + if (!intarg) + *subvol_objectid = + BTRFS_FS_TREE_OBJECTID; + else + *subvol_objectid = intarg; + } break; case Opt_device: error = btrfs_scan_one_device(match_strdup(&args[0]), -- cgit v1.2.3 From 51684082b11c304829ea22193d4d96a5b1663b97 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 10 Mar 2010 15:33:32 -0500 Subject: Btrfs: run the backing dev more often in the submit_bio helper The submit_bio helper thread can decide to loop back around to service more bios. This commit forces it to unplug first, which helps reduce the latency seen by submitters. Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 41ecbb2347f2..ace2e8d7bbcd 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -325,16 +325,6 @@ loop_lock: num_sync_run = 0; blk_run_backing_dev(bdi, NULL); } - - cond_resched(); - if (again) - goto loop; - - spin_lock(&device->io_lock); - if (device->pending_bios.head || device->pending_sync_bios.head) - goto loop_lock; - spin_unlock(&device->io_lock); - /* * IO has already been through a long path to get here. Checksumming, * async helper threads, perhaps compression. We've done a pretty @@ -346,6 +336,16 @@ loop_lock: * cared about found its way down here. */ blk_run_backing_dev(bdi, NULL); + + cond_resched(); + if (again) + goto loop; + + spin_lock(&device->io_lock); + if (device->pending_bios.head || device->pending_sync_bios.head) + goto loop_lock; + spin_unlock(&device->io_lock); + done: return 0; } -- cgit v1.2.3 From 940100a4a7b78b27e60a3e72340fb9b5397dcdb2 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 10 Mar 2010 10:52:59 -0500 Subject: Btrfs: be more selective in the defrag ioctl The btrfs defrag ioctl had some bugs around delalloc accounting, and it wasn't properly skipping pages that were not in the mapping. It wasn't properly clearing the page checked flag, which could make the writeback code ignore the page forever while pinning it as dirty. This commit fixes those problems and makes defrag a little smarter. It skips holes and it doesn't waste time defragging large extents. If a tiny extent comes before a very large extent, it will defrag both of them to make sure the tiny extent ends up next to something big. Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 150 +++++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 140 insertions(+), 10 deletions(-) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 7875a75315d0..3a89cd77f307 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -475,6 +475,73 @@ out_unlock: return error; } +static int should_defrag_range(struct inode *inode, u64 start, u64 len, + u64 *last_len, u64 *skip, u64 *defrag_end) +{ + struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + struct extent_map *em = NULL; + struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; + int ret = 1; + + /* + * make sure that once we start defragging and extent, we keep on + * defragging it + */ + if (start < *defrag_end) + return 1; + + *skip = 0; + + /* + * hopefully we have this extent in the tree already, try without + * the full extent lock + */ + read_lock(&em_tree->lock); + em = lookup_extent_mapping(em_tree, start, len); + read_unlock(&em_tree->lock); + + if (!em) { + /* get the big lock and read metadata off disk */ + lock_extent(io_tree, start, start + len - 1, GFP_NOFS); + em = btrfs_get_extent(inode, NULL, 0, start, len, 0); + unlock_extent(io_tree, start, start + len - 1, GFP_NOFS); + + if (!em) + return 0; + } + + /* this will cover holes, and inline extents */ + if (em->block_start >= EXTENT_MAP_LAST_BYTE) + ret = 0; + + /* + * we hit a real extent, if it is big don't bother defragging it again + */ + if ((*last_len == 0 || *last_len >= 256 * 1024) && + em->len >= 256 * 1024) + ret = 0; + + /* + * last_len ends up being a counter of how many bytes we've defragged. + * every time we choose not to defrag an extent, we reset *last_len + * so that the next tiny extent will force a defrag. + * + * The end result of this is that tiny extents before a single big + * extent will force at least part of that big extent to be defragged. + */ + if (ret) { + *last_len += len; + *defrag_end = extent_map_end(em); + } else { + *last_len = 0; + *skip = extent_map_end(em); + *defrag_end = 0; + } + + free_extent_map(em); + return ret; +} + static int btrfs_defrag_file(struct file *file) { struct inode *inode = fdentry(file)->d_inode; @@ -487,37 +554,86 @@ static int btrfs_defrag_file(struct file *file) unsigned long total_read = 0; u64 page_start; u64 page_end; + u64 last_len = 0; + u64 skip = 0; + u64 defrag_end = 0; unsigned long i; int ret; - ret = btrfs_check_data_free_space(root, inode, inode->i_size); - if (ret) - return -ENOSPC; + if (inode->i_size == 0) + return 0; + + last_index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT; + i = 0; + while (i <= last_index) { + if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, + PAGE_CACHE_SIZE, &last_len, &skip, + &defrag_end)) { + unsigned long next; + /* + * the should_defrag function tells us how much to skip + * bump our counter by the suggested amount + */ + next = (skip + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + i = max(i + 1, next); + continue; + } - mutex_lock(&inode->i_mutex); - last_index = inode->i_size >> PAGE_CACHE_SHIFT; - for (i = 0; i <= last_index; i++) { if (total_read % ra_pages == 0) { btrfs_force_ra(inode->i_mapping, &file->f_ra, file, i, min(last_index, i + ra_pages - 1)); } total_read++; + mutex_lock(&inode->i_mutex); + + ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE); + if (ret) { + ret = -ENOSPC; + break; + } + + ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1); + if (ret) { + btrfs_free_reserved_data_space(root, inode, + PAGE_CACHE_SIZE); + ret = -ENOSPC; + break; + } again: + if (inode->i_size == 0 || + i > ((inode->i_size - 1) >> PAGE_CACHE_SHIFT)) { + ret = 0; + goto err_reservations; + } + page = grab_cache_page(inode->i_mapping, i); if (!page) - goto out_unlock; + goto err_reservations; + if (!PageUptodate(page)) { btrfs_readpage(NULL, page); lock_page(page); if (!PageUptodate(page)) { unlock_page(page); page_cache_release(page); - goto out_unlock; + goto err_reservations; } } + if (page->mapping != inode->i_mapping) { + unlock_page(page); + page_cache_release(page); + goto again; + } + wait_on_page_writeback(page); + if (PageDirty(page)) { + btrfs_free_reserved_data_space(root, inode, + PAGE_CACHE_SIZE); + goto loop_unlock; + } + page_start = (u64)page->index << PAGE_CACHE_SHIFT; page_end = page_start + PAGE_CACHE_SIZE - 1; lock_extent(io_tree, page_start, page_end, GFP_NOFS); @@ -538,18 +654,32 @@ again: * page if it is dirtied again later */ clear_page_dirty_for_io(page); + clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, + page_end, EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_DO_ACCOUNTING, GFP_NOFS); btrfs_set_extent_delalloc(inode, page_start, page_end); + ClearPageChecked(page); set_page_dirty(page); unlock_extent(io_tree, page_start, page_end, GFP_NOFS); + +loop_unlock: unlock_page(page); page_cache_release(page); + mutex_unlock(&inode->i_mutex); + + btrfs_unreserve_metadata_for_delalloc(root, inode, 1); balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1); + i++; } -out_unlock: - mutex_unlock(&inode->i_mutex); return 0; + +err_reservations: + mutex_unlock(&inode->i_mutex); + btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE); + btrfs_unreserve_metadata_for_delalloc(root, inode, 1); + return ret; } static noinline int btrfs_ioctl_resize(struct btrfs_root *root, -- cgit v1.2.3 From 1e701a3292e25a6c4939cad9f24951dc6b6ad853 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Thu, 11 Mar 2010 09:42:04 -0500 Subject: Btrfs: add new defrag-range ioctl. The btrfs defrag ioctl was limited to doing the entire file. This commit adds a new interface that can defrag a specific range inside the file. It can also force compression on the file, allowing you to selectively compress individual files after they were created, even when mount -o compress isn't turned on. Signed-off-by: Chris Mason --- fs/btrfs/btrfs_inode.h | 5 +++ fs/btrfs/ctree.h | 1 - fs/btrfs/inode.c | 11 +++++-- fs/btrfs/ioctl.c | 83 ++++++++++++++++++++++++++++++++++++++++++++------ fs/btrfs/ioctl.h | 31 +++++++++++++++++++ 5 files changed, 117 insertions(+), 14 deletions(-) diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 3f1f50d9d916..7a4dee199832 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -153,6 +153,11 @@ struct btrfs_inode { unsigned ordered_data_close:1; unsigned dummy_inode:1; + /* + * always compress this one file + */ + unsigned force_compress:1; + struct inode vfs_inode; }; diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 1166b15e9bf6..3a36b1fb553a 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1184,7 +1184,6 @@ struct btrfs_root { #define BTRFS_INODE_NOATIME (1 << 9) #define BTRFS_INODE_DIRSYNC (1 << 10) - /* some macros to generate set/get funcs for the struct fields. This * assumes there is a lefoo_to_cpu for every type, so lets make a simple * one for u8: diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 7d10d1ccb0fe..3657925c2461 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -379,7 +379,8 @@ again: * change at any time if we discover bad compression ratios. */ if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) && - btrfs_test_opt(root, COMPRESS)) { + (btrfs_test_opt(root, COMPRESS) || + (BTRFS_I(inode)->force_compress))) { WARN_ON(pages); pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); @@ -483,8 +484,10 @@ again: nr_pages_ret = 0; /* flag the file so we don't compress in the future */ - if (!btrfs_test_opt(root, FORCE_COMPRESS)) + if (!btrfs_test_opt(root, FORCE_COMPRESS) && + !(BTRFS_I(inode)->force_compress)) { BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS; + } } if (will_compress) { *num_added += 1; @@ -1211,7 +1214,8 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page, else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) ret = run_delalloc_nocow(inode, locked_page, start, end, page_started, 0, nr_written); - else if (!btrfs_test_opt(root, COMPRESS)) + else if (!btrfs_test_opt(root, COMPRESS) && + !(BTRFS_I(inode)->force_compress)) ret = cow_file_range(inode, locked_page, start, end, page_started, nr_written, 1); else @@ -3639,6 +3643,7 @@ static noinline void init_btrfs_i(struct inode *inode) bi->index_cnt = (u64)-1; bi->last_unlink_trans = 0; bi->ordered_data_close = 0; + bi->force_compress = 0; extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS); extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode->i_mapping, GFP_NOFS); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 3a89cd77f307..d866b460c26e 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -476,13 +476,18 @@ out_unlock: } static int should_defrag_range(struct inode *inode, u64 start, u64 len, - u64 *last_len, u64 *skip, u64 *defrag_end) + int thresh, u64 *last_len, u64 *skip, + u64 *defrag_end) { struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct extent_map *em = NULL; struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; int ret = 1; + + if (thresh == 0) + thresh = 256 * 1024; + /* * make sure that once we start defragging and extent, we keep on * defragging it @@ -517,8 +522,7 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len, /* * we hit a real extent, if it is big don't bother defragging it again */ - if ((*last_len == 0 || *last_len >= 256 * 1024) && - em->len >= 256 * 1024) + if ((*last_len == 0 || *last_len >= thresh) && em->len >= thresh) ret = 0; /* @@ -542,7 +546,8 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len, return ret; } -static int btrfs_defrag_file(struct file *file) +static int btrfs_defrag_file(struct file *file, + struct btrfs_ioctl_defrag_range_args *range) { struct inode *inode = fdentry(file)->d_inode; struct btrfs_root *root = BTRFS_I(inode)->root; @@ -563,11 +568,19 @@ static int btrfs_defrag_file(struct file *file) if (inode->i_size == 0) return 0; - last_index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT; - i = 0; + if (range->start + range->len > range->start) { + last_index = min_t(u64, inode->i_size - 1, + range->start + range->len - 1) >> PAGE_CACHE_SHIFT; + } else { + last_index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT; + } + + i = range->start >> PAGE_CACHE_SHIFT; while (i <= last_index) { if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, - PAGE_CACHE_SIZE, &last_len, &skip, + PAGE_CACHE_SIZE, + range->extent_thresh, + &last_len, &skip, &defrag_end)) { unsigned long next; /* @@ -585,6 +598,8 @@ static int btrfs_defrag_file(struct file *file) } total_read++; mutex_lock(&inode->i_mutex); + if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) + BTRFS_I(inode)->force_compress = 1; ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE); if (ret) { @@ -673,6 +688,28 @@ loop_unlock: i++; } + if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO)) + filemap_flush(inode->i_mapping); + + if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) { + /* the filemap_flush will queue IO into the worker threads, but + * we have to make sure the IO is actually started and that + * ordered extents get created before we return + */ + atomic_inc(&root->fs_info->async_submit_draining); + while (atomic_read(&root->fs_info->nr_async_submits) || + atomic_read(&root->fs_info->async_delalloc_pages)) { + wait_event(root->fs_info->async_submit_wait, + (atomic_read(&root->fs_info->nr_async_submits) == 0 && + atomic_read(&root->fs_info->async_delalloc_pages) == 0)); + } + atomic_dec(&root->fs_info->async_submit_draining); + + mutex_lock(&inode->i_mutex); + BTRFS_I(inode)->force_compress = 0; + mutex_unlock(&inode->i_mutex); + } + return 0; err_reservations: @@ -1284,10 +1321,11 @@ out: return err; } -static int btrfs_ioctl_defrag(struct file *file) +static int btrfs_ioctl_defrag(struct file *file, void __user *argp) { struct inode *inode = fdentry(file)->d_inode; struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_ioctl_defrag_range_args *range; int ret; ret = mnt_want_write(file->f_path.mnt); @@ -1308,7 +1346,30 @@ static int btrfs_ioctl_defrag(struct file *file) ret = -EINVAL; goto out; } - btrfs_defrag_file(file); + + range = kzalloc(sizeof(*range), GFP_KERNEL); + if (!range) { + ret = -ENOMEM; + goto out; + } + + if (argp) { + if (copy_from_user(range, argp, + sizeof(*range))) { + ret = -EFAULT; + kfree(range); + } + /* compression requires us to start the IO */ + if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) { + range->flags |= BTRFS_DEFRAG_RANGE_START_IO; + range->extent_thresh = (u32)-1; + } + } else { + /* the rest are all set to zero by kzalloc */ + range->len = (u64)-1; + } + btrfs_defrag_file(file, range); + kfree(range); break; } out: @@ -1831,7 +1892,9 @@ long btrfs_ioctl(struct file *file, unsigned int case BTRFS_IOC_DEFAULT_SUBVOL: return btrfs_ioctl_default_subvol(file, argp); case BTRFS_IOC_DEFRAG: - return btrfs_ioctl_defrag(file); + return btrfs_ioctl_defrag(file, NULL); + case BTRFS_IOC_DEFRAG_RANGE: + return btrfs_ioctl_defrag(file, argp); case BTRFS_IOC_RESIZE: return btrfs_ioctl_resize(root, argp); case BTRFS_IOC_ADD_DEV: diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h index f1923e0260e3..2d64a65842f3 100644 --- a/fs/btrfs/ioctl.h +++ b/fs/btrfs/ioctl.h @@ -99,6 +99,35 @@ struct btrfs_ioctl_clone_range_args { __u64 dest_offset; }; +/* flags for the defrag range ioctl */ +#define BTRFS_DEFRAG_RANGE_COMPRESS 1 +#define BTRFS_DEFRAG_RANGE_START_IO 2 + +struct btrfs_ioctl_defrag_range_args { + /* start of the defrag operation */ + __u64 start; + + /* number of bytes to defrag, use (u64)-1 to say all */ + __u64 len; + + /* + * flags for the operation, which can include turning + * on compression for this one defrag + */ + __u64 flags; + + /* + * any extent bigger than this will be considered + * already defragged. Use 0 to take the kernel default + * Use 1 to say every single extent must be rewritten + */ + __u32 extent_thresh; + + /* spare for later */ + __u32 unused[5]; +}; + + #define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \ struct btrfs_ioctl_vol_args) #define BTRFS_IOC_DEFRAG _IOW(BTRFS_IOCTL_MAGIC, 2, \ @@ -130,6 +159,8 @@ struct btrfs_ioctl_clone_range_args { struct btrfs_ioctl_vol_args) #define BTRFS_IOC_SNAP_DESTROY _IOW(BTRFS_IOCTL_MAGIC, 15, \ struct btrfs_ioctl_vol_args) +#define BTRFS_IOC_DEFRAG_RANGE _IOW(BTRFS_IOCTL_MAGIC, 16, \ + struct btrfs_ioctl_defrag_range_args) #define BTRFS_IOC_TREE_SEARCH _IOWR(BTRFS_IOCTL_MAGIC, 17, \ struct btrfs_ioctl_search_args) #define BTRFS_IOC_INO_LOOKUP _IOWR(BTRFS_IOCTL_MAGIC, 18, \ -- cgit v1.2.3 From 3a0524dc054791688177544fe510d2868ee20d9f Mon Sep 17 00:00:00 2001 From: TARUISI Hiroaki Date: Tue, 9 Feb 2010 06:36:45 +0000 Subject: btrfs: Update existing btrfs_device for renaming device When we scan devices in a multi-device filesystem, we memorize the original name. If the device gets a new name, later scans don't update the in-kernel structures related to it, and we're not able to mount the filesystem. This patch updates device name during scaning. Signed-off-by: TARUISI Hiroaki Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index ace2e8d7bbcd..eb89e1317aca 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -365,6 +365,7 @@ static noinline int device_list_add(const char *path, struct btrfs_device *device; struct btrfs_fs_devices *fs_devices; u64 found_transid = btrfs_super_generation(disk_super); + char *name; fs_devices = find_fsid(disk_super->fsid); if (!fs_devices) { @@ -411,6 +412,12 @@ static noinline int device_list_add(const char *path, device->fs_devices = fs_devices; fs_devices->num_devices++; + } else if (strcmp(device->name, path)) { + name = kstrdup(path, GFP_NOFS); + if (!name) + return -ENOMEM; + kfree(device->name); + device->name = name; } if (found_transid > fs_devices->latest_trans) { -- cgit v1.2.3 From bd4d10888990f7e3f8029205d27eb155202d6969 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 5 Mar 2010 21:59:21 +0000 Subject: Btrfs: make df be a little bit more understandable The way we report df usage is way confusing for everybody, including some other utilities (bacula for one). So this patch makes df a little bit more understandable. First we make used actually count the total amount of used space in all space info's. This will give us a real view of how much disk space is in use. Second, for blocks available, only count data space. This makes things like bacula work because it says 0 when you can no longer write anymore data to the disk. I think this is a nice compromise, since you will end up with something like the following [root@alpha ~]# df -h Filesystem Size Used Avail Use% Mounted on /dev/mapper/VolGroup-lv_root 148G 30G 111G 21% / /dev/sda1 194M 116M 68M 64% /boot tmpfs 985M 12K 985M 1% /dev/shm /dev/mapper/VolGroup-LogVol02 145G 140G 0 100% /mnt/btrfs-test Compare this with btrfsctl -i output [root@alpha btrfs-progs-unstable]# ./btrfsctl -i /mnt/btrfs-test/ Metadata, DUP: total=4.62GB, used=2.46GB System, DUP: total=8.00MB, used=24.00KB Data: total=134.80GB, used=134.80GB Metadata: total=8.00MB, used=0.00 System: total=4.00MB, used=0.00 operation complete This way we show that there is no more data space to be used, but we have another 5GB of space left for metadata. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/super.c | 29 ++++++++++++++++++++++++++--- 1 file changed, 26 insertions(+), 3 deletions(-) diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 9771eb8694b6..ff3dd55f294d 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -752,14 +752,37 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct btrfs_root *root = btrfs_sb(dentry->d_sb); struct btrfs_super_block *disk_super = &root->fs_info->super_copy; + struct list_head *head = &root->fs_info->space_info; + struct btrfs_space_info *found; + u64 total_used = 0; + u64 data_used = 0; int bits = dentry->d_sb->s_blocksize_bits; __be32 *fsid = (__be32 *)root->fs_info->fsid; + rcu_read_lock(); + list_for_each_entry_rcu(found, head, list) { + if (found->flags & (BTRFS_BLOCK_GROUP_DUP| + BTRFS_BLOCK_GROUP_RAID10| + BTRFS_BLOCK_GROUP_RAID1)) { + total_used += found->bytes_used; + if (found->flags & BTRFS_BLOCK_GROUP_DATA) + data_used += found->bytes_used; + else + data_used += found->total_bytes; + } + + total_used += found->bytes_used; + if (found->flags & BTRFS_BLOCK_GROUP_DATA) + data_used += found->bytes_used; + else + data_used += found->total_bytes; + } + rcu_read_unlock(); + buf->f_namelen = BTRFS_NAME_LEN; buf->f_blocks = btrfs_super_total_bytes(disk_super) >> bits; - buf->f_bfree = buf->f_blocks - - (btrfs_super_bytes_used(disk_super) >> bits); - buf->f_bavail = buf->f_bfree; + buf->f_bfree = buf->f_blocks - (total_used >> bits); + buf->f_bavail = buf->f_blocks - (data_used >> bits); buf->f_bsize = dentry->d_sb->s_blocksize; buf->f_type = BTRFS_SUPER_MAGIC; -- cgit v1.2.3 From 0bdb1db297ab36865a63ee722d35ff0a1f0ae522 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 19 Feb 2010 14:13:50 -0800 Subject: Btrfs: flush data on snapshot creation Flush any delalloc extents when we create a snapshot, so that recently written file data is always included in the snapshot. A later commit will add the ability to snapshot without the flush, but most people expect flushing. Signed-off-by: Sage Weil Signed-off-by: Chris Mason --- fs/btrfs/transaction.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 2a36e236a492..2d654c1c794d 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -997,13 +997,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, mutex_unlock(&root->fs_info->trans_mutex); - if (flush_on_commit) { + if (flush_on_commit || snap_pending) { btrfs_start_delalloc_inodes(root, 1); ret = btrfs_wait_ordered_extents(root, 0, 1); BUG_ON(ret); - } else if (snap_pending) { - ret = btrfs_wait_ordered_extents(root, 0, 1); - BUG_ON(ret); } /* -- cgit v1.2.3 From 0be2e98173f8badd5ccc7c2e994891746ba1caf4 Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Thu, 11 Feb 2010 08:06:58 +0000 Subject: btrfs: fix btrfs_mkdir goto for no free objectids btrfs_mkdir() must jump to the place of ending transaction after btrfs_find_free_objectid() failed. Or this transaction can't end. Signed-off-by: Miao Xie Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 3657925c2461..50ce8840a99b 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4508,7 +4508,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid); if (err) { err = -ENOSPC; - goto out_unlock; + goto out_fail; } inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, -- cgit v1.2.3 From 4125bf761cd0786e1163e024c7c809ce2cc625bc Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 3 Feb 2010 18:18:45 +0000 Subject: Btrfs: finish read pages in the order they are submitted The endio is done at reverse order of bio vectors. That means for a sequential read, the page first submitted will finish last in a bio. Considering we will do checksum (making cache hot) for every page, this does introduce delay (and chance to squeeze cache used soon) for pages submitted at the begining. I don't observe obvious performance difference with below patch at my simple test, but seems more natural to finish read in the order they are submitted. Signed-off-by: Shaohua Li Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 7073cbb1b2d4..355a973719a0 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1750,7 +1750,8 @@ static void end_bio_extent_writepage(struct bio *bio, int err) static void end_bio_extent_readpage(struct bio *bio, int err) { int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); - struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; + struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1; + struct bio_vec *bvec = bio->bi_io_vec; struct extent_io_tree *tree; u64 start; u64 end; @@ -1773,7 +1774,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err) else whole_page = 0; - if (--bvec >= bio->bi_io_vec) + if (++bvec <= bvec_end) prefetchw(&bvec->bv_page->flags); if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { @@ -1818,7 +1819,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err) } check_page_locked(tree, page); } - } while (bvec >= bio->bi_io_vec); + } while (bvec <= bvec_end); bio_put(bio); } -- cgit v1.2.3 From 49958fd7dbb83cd4d65179d025940e01fe1fbacd Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 2 Feb 2010 21:48:28 +0000 Subject: Btrfs: change the ordered tree to use a spinlock instead of a mutex The ordered tree used to need a mutex, but currently all we use it for is to protect the rb_tree, and a spin_lock is just fine for that. Using a spin_lock instead makes dbench run a little faster, 58 mb/s instead of 51 mb/s, and have less latency, 3445.138 ms instead of 3820.633 ms. Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/ordered-data.c | 34 +++++++++++++++++----------------- fs/btrfs/ordered-data.h | 4 ++-- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 5c2a9e78a949..d56f732ba95e 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -174,7 +174,6 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, if (!entry) return -ENOMEM; - mutex_lock(&tree->mutex); entry->file_offset = file_offset; entry->start = start; entry->len = len; @@ -190,16 +189,17 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, INIT_LIST_HEAD(&entry->list); INIT_LIST_HEAD(&entry->root_extent_list); + spin_lock(&tree->lock); node = tree_insert(&tree->tree, file_offset, &entry->rb_node); BUG_ON(node); + spin_unlock(&tree->lock); spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); list_add_tail(&entry->root_extent_list, &BTRFS_I(inode)->root->fs_info->ordered_extents); spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); - mutex_unlock(&tree->mutex); BUG_ON(node); return 0; } @@ -216,9 +216,9 @@ int btrfs_add_ordered_sum(struct inode *inode, struct btrfs_ordered_inode_tree *tree; tree = &BTRFS_I(inode)->ordered_tree; - mutex_lock(&tree->mutex); + spin_lock(&tree->lock); list_add_tail(&sum->list, &entry->list); - mutex_unlock(&tree->mutex); + spin_unlock(&tree->lock); return 0; } @@ -240,7 +240,7 @@ int btrfs_dec_test_ordered_pending(struct inode *inode, int ret; tree = &BTRFS_I(inode)->ordered_tree; - mutex_lock(&tree->mutex); + spin_lock(&tree->lock); node = tree_search(tree, file_offset); if (!node) { ret = 1; @@ -264,7 +264,7 @@ int btrfs_dec_test_ordered_pending(struct inode *inode, else ret = 1; out: - mutex_unlock(&tree->mutex); + spin_unlock(&tree->lock); return ret == 0; } @@ -291,7 +291,7 @@ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) /* * remove an ordered extent from the tree. No references are dropped - * and you must wake_up entry->wait. You must hold the tree mutex + * and you must wake_up entry->wait. You must hold the tree lock * while you call this function. */ static int __btrfs_remove_ordered_extent(struct inode *inode, @@ -340,9 +340,9 @@ int btrfs_remove_ordered_extent(struct inode *inode, int ret; tree = &BTRFS_I(inode)->ordered_tree; - mutex_lock(&tree->mutex); + spin_lock(&tree->lock); ret = __btrfs_remove_ordered_extent(inode, entry); - mutex_unlock(&tree->mutex); + spin_unlock(&tree->lock); wake_up(&entry->wait); return ret; @@ -567,7 +567,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, struct btrfs_ordered_extent *entry = NULL; tree = &BTRFS_I(inode)->ordered_tree; - mutex_lock(&tree->mutex); + spin_lock(&tree->lock); node = tree_search(tree, file_offset); if (!node) goto out; @@ -578,7 +578,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, if (entry) atomic_inc(&entry->refs); out: - mutex_unlock(&tree->mutex); + spin_unlock(&tree->lock); return entry; } @@ -594,7 +594,7 @@ btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset) struct btrfs_ordered_extent *entry = NULL; tree = &BTRFS_I(inode)->ordered_tree; - mutex_lock(&tree->mutex); + spin_lock(&tree->lock); node = tree_search(tree, file_offset); if (!node) goto out; @@ -602,7 +602,7 @@ btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset) entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); atomic_inc(&entry->refs); out: - mutex_unlock(&tree->mutex); + spin_unlock(&tree->lock); return entry; } @@ -629,7 +629,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, else offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize); - mutex_lock(&tree->mutex); + spin_lock(&tree->lock); disk_i_size = BTRFS_I(inode)->disk_i_size; /* truncate file */ @@ -735,7 +735,7 @@ out: */ if (ordered) __btrfs_remove_ordered_extent(inode, ordered); - mutex_unlock(&tree->mutex); + spin_unlock(&tree->lock); if (ordered) wake_up(&ordered->wait); return ret; @@ -762,7 +762,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, if (!ordered) return 1; - mutex_lock(&tree->mutex); + spin_lock(&tree->lock); list_for_each_entry_reverse(ordered_sum, &ordered->list, list) { if (disk_bytenr >= ordered_sum->bytenr) { num_sectors = ordered_sum->len / sectorsize; @@ -777,7 +777,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, } } out: - mutex_unlock(&tree->mutex); + spin_unlock(&tree->lock); btrfs_put_ordered_extent(ordered); return ret; } diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index 9116c6d0c5a9..bfbcebbb0adc 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -21,7 +21,7 @@ /* one of these per inode */ struct btrfs_ordered_inode_tree { - struct mutex mutex; + spinlock_t lock; struct rb_root tree; struct rb_node *last; }; @@ -128,7 +128,7 @@ static inline int btrfs_ordered_sum_size(struct btrfs_root *root, static inline void btrfs_ordered_inode_tree_init(struct btrfs_ordered_inode_tree *t) { - mutex_init(&t->mutex); + spin_lock_init(&t->lock); t->tree = RB_ROOT; t->last = NULL; } -- cgit v1.2.3 From c2a128d28a2e78e159e17e8c9274d0a9d9492555 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 2 Feb 2010 21:19:11 +0000 Subject: Btrfs: cache extent state in find_delalloc_range This patch makes us cache the extent state we find in find_delalloc_range since we'll have to lock the extent later on in the function. This will keep us from re-searching for the rang when we try to lock the extent. Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 355a973719a0..3c17c9eb0d98 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1171,7 +1171,8 @@ out: * 1 is returned if we find something, 0 if nothing was in the tree */ static noinline u64 find_delalloc_range(struct extent_io_tree *tree, - u64 *start, u64 *end, u64 max_bytes) + u64 *start, u64 *end, u64 max_bytes, + struct extent_state **cached_state) { struct rb_node *node; struct extent_state *state; @@ -1203,8 +1204,11 @@ static noinline u64 find_delalloc_range(struct extent_io_tree *tree, *end = state->end; goto out; } - if (!found) + if (!found) { *start = state->start; + *cached_state = state; + atomic_inc(&state->refs); + } found++; *end = state->end; cur_start = state->end + 1; @@ -1336,10 +1340,11 @@ again: delalloc_start = *start; delalloc_end = 0; found = find_delalloc_range(tree, &delalloc_start, &delalloc_end, - max_bytes); + max_bytes, &cached_state); if (!found || delalloc_end <= *start) { *start = delalloc_start; *end = delalloc_end; + free_extent_state(cached_state); return found; } -- cgit v1.2.3 From 5a1a3df1f6c86926cfe8657e6f9b4b4c2f467d60 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 2 Feb 2010 20:51:14 +0000 Subject: Btrfs: cache ordered extent when completing io When finishing io we run btrfs_dec_test_ordered_pending, and then immediately run btrfs_lookup_ordered_extent, but btrfs_dec_test_ordered_pending does that already, so we're searching twice when we don't have to. This patch lets us pass a btrfs_ordered_extent in to btrfs_dec_test_ordered_pending so if we do complete io on that ordered extent we can just use the one we found then instead of having to do another btrfs_lookup_ordered_extent. This made my fio job with the other patch go from 24 mb/s to 29 mb/s. Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 5 ++--- fs/btrfs/ordered-data.c | 7 ++++++- fs/btrfs/ordered-data.h | 3 ++- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 50ce8840a99b..1824dda1d351 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1698,11 +1698,10 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) int compressed = 0; int ret; - ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1); + ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start, + end - start + 1); if (!ret) return 0; - - ordered_extent = btrfs_lookup_ordered_extent(inode, start); BUG_ON(!ordered_extent); if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index d56f732ba95e..a8ffecd0b491 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -232,11 +232,12 @@ int btrfs_add_ordered_sum(struct inode *inode, * to make sure this function only returns 1 once for a given ordered extent. */ int btrfs_dec_test_ordered_pending(struct inode *inode, + struct btrfs_ordered_extent **cached, u64 file_offset, u64 io_size) { struct btrfs_ordered_inode_tree *tree; struct rb_node *node; - struct btrfs_ordered_extent *entry; + struct btrfs_ordered_extent *entry = NULL; int ret; tree = &BTRFS_I(inode)->ordered_tree; @@ -264,6 +265,10 @@ int btrfs_dec_test_ordered_pending(struct inode *inode, else ret = 1; out: + if (!ret && cached && entry) { + *cached = entry; + atomic_inc(&entry->refs); + } spin_unlock(&tree->lock); return ret == 0; } diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index bfbcebbb0adc..c82f76a9f040 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -137,7 +137,8 @@ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry); int btrfs_remove_ordered_extent(struct inode *inode, struct btrfs_ordered_extent *entry); int btrfs_dec_test_ordered_pending(struct inode *inode, - u64 file_offset, u64 io_size); + struct btrfs_ordered_extent **cached, + u64 file_offset, u64 io_size); int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, u64 start, u64 len, u64 disk_len, int tyep); int btrfs_add_ordered_sum(struct inode *inode, -- cgit v1.2.3 From 2ac55d41b5d6bf49e76bc85db5431240617e2f8f Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 3 Feb 2010 19:33:23 +0000 Subject: Btrfs: cache the extent state everywhere we possibly can V2 This patch just goes through and fixes everybody that does lock_extent() blah unlock_extent() to use lock_extent_bits() blah unlock_extent_cached() and pass around a extent_state so we only have to do the searches once per function. This gives me about a 3 mb/s boots on my random write test. I have not converted some things, like the relocation and ioctl's, since they aren't heavily used and the relocation stuff is in the middle of being re-written. I also changed the clear_extent_bit() to only unset the cached state if we are clearing EXTENT_LOCKED and related stuff, so we can do things like this lock_extent_bits() clear delalloc bits unlock_extent_cached() without losing our cached state. I tested this thoroughly and turned on LEAK_DEBUG to make sure we weren't leaking extent states, everything worked out fine. Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 3 +- fs/btrfs/disk-io.c | 15 ++++--- fs/btrfs/extent-tree.c | 11 +++-- fs/btrfs/extent_io.c | 61 +++++++++++++++++---------- fs/btrfs/extent_io.h | 10 +++-- fs/btrfs/file.c | 23 ++++++---- fs/btrfs/inode.c | 111 ++++++++++++++++++++++++++++++------------------- fs/btrfs/ioctl.c | 2 +- fs/btrfs/relocation.c | 2 +- 9 files changed, 148 insertions(+), 90 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 3a36b1fb553a..3f704a816137 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2311,7 +2311,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, u32 min_type); int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput); -int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end); +int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, + struct extent_state **cached_state); int btrfs_writepages(struct address_space *mapping, struct writeback_control *wbc); int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 0427183e3e05..11d0ad30e203 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -263,13 +263,15 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf, static int verify_parent_transid(struct extent_io_tree *io_tree, struct extent_buffer *eb, u64 parent_transid) { + struct extent_state *cached_state = NULL; int ret; if (!parent_transid || btrfs_header_generation(eb) == parent_transid) return 0; - lock_extent(io_tree, eb->start, eb->start + eb->len - 1, GFP_NOFS); - if (extent_buffer_uptodate(io_tree, eb) && + lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, + 0, &cached_state, GFP_NOFS); + if (extent_buffer_uptodate(io_tree, eb, cached_state) && btrfs_header_generation(eb) == parent_transid) { ret = 0; goto out; @@ -282,10 +284,10 @@ static int verify_parent_transid(struct extent_io_tree *io_tree, (unsigned long long)btrfs_header_generation(eb)); } ret = 1; - clear_extent_buffer_uptodate(io_tree, eb); + clear_extent_buffer_uptodate(io_tree, eb, &cached_state); out: - unlock_extent(io_tree, eb->start, eb->start + eb->len - 1, - GFP_NOFS); + unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1, + &cached_state, GFP_NOFS); return ret; } @@ -2497,7 +2499,8 @@ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid) int ret; struct inode *btree_inode = buf->first_page->mapping->host; - ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf); + ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf, + NULL); if (!ret) return ret; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 559f72489b3b..1727b26fb194 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -6561,6 +6561,7 @@ static noinline int invalidate_extent_cache(struct btrfs_root *root, struct btrfs_key key; struct inode *inode = NULL; struct btrfs_file_extent_item *fi; + struct extent_state *cached_state = NULL; u64 num_bytes; u64 skip_objectid = 0; u32 nritems; @@ -6589,12 +6590,14 @@ static noinline int invalidate_extent_cache(struct btrfs_root *root, } num_bytes = btrfs_file_extent_num_bytes(leaf, fi); - lock_extent(&BTRFS_I(inode)->io_tree, key.offset, - key.offset + num_bytes - 1, GFP_NOFS); + lock_extent_bits(&BTRFS_I(inode)->io_tree, key.offset, + key.offset + num_bytes - 1, 0, &cached_state, + GFP_NOFS); btrfs_drop_extent_cache(inode, key.offset, key.offset + num_bytes - 1, 1); - unlock_extent(&BTRFS_I(inode)->io_tree, key.offset, - key.offset + num_bytes - 1, GFP_NOFS); + unlock_extent_cached(&BTRFS_I(inode)->io_tree, key.offset, + key.offset + num_bytes - 1, &cached_state, + GFP_NOFS); cond_resched(); } iput(inode); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 3c17c9eb0d98..c99121ac5d6b 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -513,7 +513,10 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u64 last_end; int err; int set = 0; + int clear = 0; + if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY)) + clear = 1; again: if (!prealloc && (mask & __GFP_WAIT)) { prealloc = alloc_extent_state(mask); @@ -524,14 +527,20 @@ again: spin_lock(&tree->lock); if (cached_state) { cached = *cached_state; - *cached_state = NULL; - cached_state = NULL; + + if (clear) { + *cached_state = NULL; + cached_state = NULL; + } + if (cached && cached->tree && cached->start == start) { - atomic_dec(&cached->refs); + if (clear) + atomic_dec(&cached->refs); state = cached; goto hit_next; } - free_extent_state(cached); + if (clear) + free_extent_state(cached); } /* * this search will find the extents that end after @@ -946,11 +955,11 @@ int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, } int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, - gfp_t mask) + struct extent_state **cached_state, gfp_t mask) { return set_extent_bit(tree, start, end, EXTENT_DELALLOC | EXTENT_DIRTY | EXTENT_UPTODATE, - 0, NULL, NULL, mask); + 0, NULL, cached_state, mask); } int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, @@ -984,10 +993,11 @@ int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, } static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, - u64 end, gfp_t mask) + u64 end, struct extent_state **cached_state, + gfp_t mask) { return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, - NULL, mask); + cached_state, mask); } int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end) @@ -1727,7 +1737,7 @@ static void end_bio_extent_writepage(struct bio *bio, int err) } if (!uptodate) { - clear_extent_uptodate(tree, start, end, GFP_NOFS); + clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS); ClearPageUptodate(page); SetPageError(page); } @@ -2710,6 +2720,7 @@ int extent_readpages(struct extent_io_tree *tree, int extent_invalidatepage(struct extent_io_tree *tree, struct page *page, unsigned long offset) { + struct extent_state *cached_state = NULL; u64 start = ((u64)page->index << PAGE_CACHE_SHIFT); u64 end = start + PAGE_CACHE_SIZE - 1; size_t blocksize = page->mapping->host->i_sb->s_blocksize; @@ -2718,12 +2729,12 @@ int extent_invalidatepage(struct extent_io_tree *tree, if (start > end) return 0; - lock_extent(tree, start, end, GFP_NOFS); + lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS); wait_on_page_writeback(page); clear_extent_bit(tree, start, end, EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, - 1, 1, NULL, GFP_NOFS); + 1, 1, &cached_state, GFP_NOFS); return 0; } @@ -2926,16 +2937,17 @@ sector_t extent_bmap(struct address_space *mapping, sector_t iblock, get_extent_t *get_extent) { struct inode *inode = mapping->host; + struct extent_state *cached_state = NULL; u64 start = iblock << inode->i_blkbits; sector_t sector = 0; size_t blksize = (1 << inode->i_blkbits); struct extent_map *em; - lock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1, - GFP_NOFS); + lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + blksize - 1, + 0, &cached_state, GFP_NOFS); em = get_extent(inode, NULL, 0, start, blksize, 0); - unlock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1, - GFP_NOFS); + unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, + start + blksize - 1, &cached_state, GFP_NOFS); if (!em || IS_ERR(em)) return 0; @@ -2957,6 +2969,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u32 flags = 0; u64 disko = 0; struct extent_map *em = NULL; + struct extent_state *cached_state = NULL; int end = 0; u64 em_start = 0, em_len = 0; unsigned long emflags; @@ -2965,8 +2978,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, if (len == 0) return -EINVAL; - lock_extent(&BTRFS_I(inode)->io_tree, start, start + len, - GFP_NOFS); + lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, + &cached_state, GFP_NOFS); em = get_extent(inode, NULL, 0, off, max - off, 0); if (!em) goto out; @@ -3029,8 +3042,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, out_free: free_extent_map(em); out: - unlock_extent(&BTRFS_I(inode)->io_tree, start, start + len, - GFP_NOFS); + unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len, + &cached_state, GFP_NOFS); return ret; } @@ -3270,7 +3283,8 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree, } int clear_extent_buffer_uptodate(struct extent_io_tree *tree, - struct extent_buffer *eb) + struct extent_buffer *eb, + struct extent_state **cached_state) { unsigned long i; struct page *page; @@ -3280,7 +3294,7 @@ int clear_extent_buffer_uptodate(struct extent_io_tree *tree, clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, - GFP_NOFS); + cached_state, GFP_NOFS); for (i = 0; i < num_pages; i++) { page = extent_buffer_page(eb, i); if (page) @@ -3340,7 +3354,8 @@ int extent_range_uptodate(struct extent_io_tree *tree, } int extent_buffer_uptodate(struct extent_io_tree *tree, - struct extent_buffer *eb) + struct extent_buffer *eb, + struct extent_state *cached_state) { int ret = 0; unsigned long num_pages; @@ -3352,7 +3367,7 @@ int extent_buffer_uptodate(struct extent_io_tree *tree, return 1; ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1, - EXTENT_UPTODATE, 1, NULL); + EXTENT_UPTODATE, 1, cached_state); if (ret) return ret; diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 36de250a7b2b..bbab4813646f 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -163,6 +163,8 @@ int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int bits, struct extent_state **cached, gfp_t mask); int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); +int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end, + struct extent_state **cached, gfp_t mask); int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); int extent_read_full_page(struct extent_io_tree *tree, struct page *page, @@ -196,7 +198,7 @@ int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end, int clear_extent_ordered_metadata(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, - gfp_t mask); + struct extent_state **cached_state, gfp_t mask); int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); int find_first_extent_bit(struct extent_io_tree *tree, u64 start, @@ -281,9 +283,11 @@ int test_extent_buffer_dirty(struct extent_io_tree *tree, int set_extent_buffer_uptodate(struct extent_io_tree *tree, struct extent_buffer *eb); int clear_extent_buffer_uptodate(struct extent_io_tree *tree, - struct extent_buffer *eb); + struct extent_buffer *eb, + struct extent_state **cached_state); int extent_buffer_uptodate(struct extent_io_tree *tree, - struct extent_buffer *eb); + struct extent_buffer *eb, + struct extent_state *cached_state); int map_extent_buffer(struct extent_buffer *eb, unsigned long offset, unsigned long min_len, char **token, char **map, unsigned long *map_start, diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index a7fd9f3a750a..d146dde7efb6 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -123,7 +123,8 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans, root->sectorsize - 1) & ~((u64)root->sectorsize - 1); end_of_last_block = start_pos + num_bytes - 1; - err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block); + err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, + NULL); if (err) return err; @@ -753,6 +754,7 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file, loff_t pos, unsigned long first_index, unsigned long last_index, size_t write_bytes) { + struct extent_state *cached_state = NULL; int i; unsigned long index = pos >> PAGE_CACHE_SHIFT; struct inode *inode = fdentry(file)->d_inode; @@ -781,16 +783,18 @@ again: } if (start_pos < inode->i_size) { struct btrfs_ordered_extent *ordered; - lock_extent(&BTRFS_I(inode)->io_tree, - start_pos, last_pos - 1, GFP_NOFS); + lock_extent_bits(&BTRFS_I(inode)->io_tree, + start_pos, last_pos - 1, 0, &cached_state, + GFP_NOFS); ordered = btrfs_lookup_first_ordered_extent(inode, last_pos - 1); if (ordered && ordered->file_offset + ordered->len > start_pos && ordered->file_offset < last_pos) { btrfs_put_ordered_extent(ordered); - unlock_extent(&BTRFS_I(inode)->io_tree, - start_pos, last_pos - 1, GFP_NOFS); + unlock_extent_cached(&BTRFS_I(inode)->io_tree, + start_pos, last_pos - 1, + &cached_state, GFP_NOFS); for (i = 0; i < num_pages; i++) { unlock_page(pages[i]); page_cache_release(pages[i]); @@ -802,12 +806,13 @@ again: if (ordered) btrfs_put_ordered_extent(ordered); - clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos, + clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos, last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_DO_ACCOUNTING, + EXTENT_DO_ACCOUNTING, 0, 0, &cached_state, GFP_NOFS); - unlock_extent(&BTRFS_I(inode)->io_tree, - start_pos, last_pos - 1, GFP_NOFS); + unlock_extent_cached(&BTRFS_I(inode)->io_tree, + start_pos, last_pos - 1, &cached_state, + GFP_NOFS); } for (i = 0; i < num_pages; i++) { clear_page_dirty_for_io(pages[i]); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 1824dda1d351..2a337a09c650 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -573,8 +573,8 @@ retry: unsigned long nr_written = 0; lock_extent(io_tree, async_extent->start, - async_extent->start + - async_extent->ram_size - 1, GFP_NOFS); + async_extent->start + + async_extent->ram_size - 1, GFP_NOFS); /* allocate blocks */ ret = cow_file_range(inode, async_cow->locked_page, @@ -1512,12 +1512,13 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans, return 0; } -int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end) +int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, + struct extent_state **cached_state) { if ((end & (PAGE_CACHE_SIZE - 1)) == 0) WARN_ON(1); return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, - GFP_NOFS); + cached_state, GFP_NOFS); } /* see btrfs_writepage_start_hook for details on why this is required */ @@ -1530,6 +1531,7 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work) { struct btrfs_writepage_fixup *fixup; struct btrfs_ordered_extent *ordered; + struct extent_state *cached_state = NULL; struct page *page; struct inode *inode; u64 page_start; @@ -1548,7 +1550,8 @@ again: page_start = page_offset(page); page_end = page_offset(page) + PAGE_CACHE_SIZE - 1; - lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS); + lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0, + &cached_state, GFP_NOFS); /* already ordered? We're done */ if (PagePrivate2(page)) @@ -1556,17 +1559,18 @@ again: ordered = btrfs_lookup_ordered_extent(inode, page_start); if (ordered) { - unlock_extent(&BTRFS_I(inode)->io_tree, page_start, - page_end, GFP_NOFS); + unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, + page_end, &cached_state, GFP_NOFS); unlock_page(page); btrfs_start_ordered_extent(inode, ordered, 1); goto again; } - btrfs_set_extent_delalloc(inode, page_start, page_end); + btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state); ClearPageChecked(page); out: - unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS); + unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end, + &cached_state, GFP_NOFS); out_page: unlock_page(page); page_cache_release(page); @@ -1695,6 +1699,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) struct btrfs_trans_handle *trans; struct btrfs_ordered_extent *ordered_extent = NULL; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + struct extent_state *cached_state = NULL; int compressed = 0; int ret; @@ -1716,9 +1721,9 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) goto out; } - lock_extent(io_tree, ordered_extent->file_offset, - ordered_extent->file_offset + ordered_extent->len - 1, - GFP_NOFS); + lock_extent_bits(io_tree, ordered_extent->file_offset, + ordered_extent->file_offset + ordered_extent->len - 1, + 0, &cached_state, GFP_NOFS); trans = btrfs_join_transaction(root, 1); @@ -1745,9 +1750,10 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) ordered_extent->len); BUG_ON(ret); } - unlock_extent(io_tree, ordered_extent->file_offset, - ordered_extent->file_offset + ordered_extent->len - 1, - GFP_NOFS); + unlock_extent_cached(io_tree, ordered_extent->file_offset, + ordered_extent->file_offset + + ordered_extent->len - 1, &cached_state, GFP_NOFS); + add_pending_csums(trans, inode, ordered_extent->file_offset, &ordered_extent->list); @@ -3084,6 +3090,7 @@ static int btrfs_truncate_page(struct address_space *mapping, loff_t from) struct btrfs_root *root = BTRFS_I(inode)->root; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct btrfs_ordered_extent *ordered; + struct extent_state *cached_state = NULL; char *kaddr; u32 blocksize = root->sectorsize; pgoff_t index = from >> PAGE_CACHE_SHIFT; @@ -3130,12 +3137,14 @@ again: } wait_on_page_writeback(page); - lock_extent(io_tree, page_start, page_end, GFP_NOFS); + lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state, + GFP_NOFS); set_page_extent_mapped(page); ordered = btrfs_lookup_ordered_extent(inode, page_start); if (ordered) { - unlock_extent(io_tree, page_start, page_end, GFP_NOFS); + unlock_extent_cached(io_tree, page_start, page_end, + &cached_state, GFP_NOFS); unlock_page(page); page_cache_release(page); btrfs_start_ordered_extent(inode, ordered, 1); @@ -3143,13 +3152,15 @@ again: goto again; } - clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, + clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, - GFP_NOFS); + 0, 0, &cached_state, GFP_NOFS); - ret = btrfs_set_extent_delalloc(inode, page_start, page_end); + ret = btrfs_set_extent_delalloc(inode, page_start, page_end, + &cached_state); if (ret) { - unlock_extent(io_tree, page_start, page_end, GFP_NOFS); + unlock_extent_cached(io_tree, page_start, page_end, + &cached_state, GFP_NOFS); goto out_unlock; } @@ -3162,7 +3173,8 @@ again: } ClearPageChecked(page); set_page_dirty(page); - unlock_extent(io_tree, page_start, page_end, GFP_NOFS); + unlock_extent_cached(io_tree, page_start, page_end, &cached_state, + GFP_NOFS); out_unlock: if (ret) @@ -3180,6 +3192,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t size) struct btrfs_root *root = BTRFS_I(inode)->root; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct extent_map *em; + struct extent_state *cached_state = NULL; u64 mask = root->sectorsize - 1; u64 hole_start = (inode->i_size + mask) & ~mask; u64 block_end = (size + mask) & ~mask; @@ -3195,11 +3208,13 @@ int btrfs_cont_expand(struct inode *inode, loff_t size) struct btrfs_ordered_extent *ordered; btrfs_wait_ordered_range(inode, hole_start, block_end - hole_start); - lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS); + lock_extent_bits(io_tree, hole_start, block_end - 1, 0, + &cached_state, GFP_NOFS); ordered = btrfs_lookup_ordered_extent(inode, hole_start); if (!ordered) break; - unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS); + unlock_extent_cached(io_tree, hole_start, block_end - 1, + &cached_state, GFP_NOFS); btrfs_put_ordered_extent(ordered); } @@ -3244,7 +3259,8 @@ int btrfs_cont_expand(struct inode *inode, loff_t size) break; } - unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS); + unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state, + GFP_NOFS); return err; } @@ -4985,6 +5001,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset) { struct extent_io_tree *tree; struct btrfs_ordered_extent *ordered; + struct extent_state *cached_state = NULL; u64 page_start = page_offset(page); u64 page_end = page_start + PAGE_CACHE_SIZE - 1; @@ -5003,7 +5020,8 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset) btrfs_releasepage(page, GFP_NOFS); return; } - lock_extent(tree, page_start, page_end, GFP_NOFS); + lock_extent_bits(tree, page_start, page_end, 0, &cached_state, + GFP_NOFS); ordered = btrfs_lookup_ordered_extent(page->mapping->host, page_offset(page)); if (ordered) { @@ -5014,7 +5032,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset) clear_extent_bit(tree, page_start, page_end, EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0, - NULL, GFP_NOFS); + &cached_state, GFP_NOFS); /* * whoever cleared the private bit is responsible * for the finish_ordered_io @@ -5024,11 +5042,13 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset) page_start, page_end); } btrfs_put_ordered_extent(ordered); - lock_extent(tree, page_start, page_end, GFP_NOFS); + cached_state = NULL; + lock_extent_bits(tree, page_start, page_end, 0, &cached_state, + GFP_NOFS); } clear_extent_bit(tree, page_start, page_end, EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_DO_ACCOUNTING, 1, 1, NULL, GFP_NOFS); + EXTENT_DO_ACCOUNTING, 1, 1, &cached_state, GFP_NOFS); __btrfs_releasepage(page, GFP_NOFS); ClearPageChecked(page); @@ -5061,6 +5081,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) struct btrfs_root *root = BTRFS_I(inode)->root; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct btrfs_ordered_extent *ordered; + struct extent_state *cached_state = NULL; char *kaddr; unsigned long zero_start; loff_t size; @@ -5099,7 +5120,8 @@ again: } wait_on_page_writeback(page); - lock_extent(io_tree, page_start, page_end, GFP_NOFS); + lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state, + GFP_NOFS); set_page_extent_mapped(page); /* @@ -5108,7 +5130,8 @@ again: */ ordered = btrfs_lookup_ordered_extent(inode, page_start); if (ordered) { - unlock_extent(io_tree, page_start, page_end, GFP_NOFS); + unlock_extent_cached(io_tree, page_start, page_end, + &cached_state, GFP_NOFS); unlock_page(page); btrfs_start_ordered_extent(inode, ordered, 1); btrfs_put_ordered_extent(ordered); @@ -5122,13 +5145,15 @@ again: * is probably a better way to do this, but for now keep consistent with * prepare_pages in the normal write path. */ - clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, + clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, - GFP_NOFS); + 0, 0, &cached_state, GFP_NOFS); - ret = btrfs_set_extent_delalloc(inode, page_start, page_end); + ret = btrfs_set_extent_delalloc(inode, page_start, page_end, + &cached_state); if (ret) { - unlock_extent(io_tree, page_start, page_end, GFP_NOFS); + unlock_extent_cached(io_tree, page_start, page_end, + &cached_state, GFP_NOFS); ret = VM_FAULT_SIGBUS; btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE); goto out_unlock; @@ -5154,7 +5179,7 @@ again: BTRFS_I(inode)->last_trans = root->fs_info->generation; BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid; - unlock_extent(io_tree, page_start, page_end, GFP_NOFS); + unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS); out_unlock: btrfs_unreserve_metadata_for_delalloc(root, inode, 1); @@ -5833,6 +5858,7 @@ stop_trans: static long btrfs_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len) { + struct extent_state *cached_state = NULL; u64 cur_offset; u64 last_byte; u64 alloc_start; @@ -5871,16 +5897,17 @@ static long btrfs_fallocate(struct inode *inode, int mode, /* the extent lock is ordered inside the running * transaction */ - lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, - GFP_NOFS); + lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start, + locked_end, 0, &cached_state, GFP_NOFS); ordered = btrfs_lookup_first_ordered_extent(inode, alloc_end - 1); if (ordered && ordered->file_offset + ordered->len > alloc_start && ordered->file_offset < alloc_end) { btrfs_put_ordered_extent(ordered); - unlock_extent(&BTRFS_I(inode)->io_tree, - alloc_start, locked_end, GFP_NOFS); + unlock_extent_cached(&BTRFS_I(inode)->io_tree, + alloc_start, locked_end, + &cached_state, GFP_NOFS); /* * we can't wait on the range with the transaction * running or with the extent lock held @@ -5922,8 +5949,8 @@ static long btrfs_fallocate(struct inode *inode, int mode, break; } } - unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, - GFP_NOFS); + unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, + &cached_state, GFP_NOFS); btrfs_free_reserved_data_space(BTRFS_I(inode)->root, inode, alloc_end - alloc_start); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index d866b460c26e..9aaba6e472d3 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -673,7 +673,7 @@ again: page_end, EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, GFP_NOFS); - btrfs_set_extent_delalloc(inode, page_start, page_end); + btrfs_set_extent_delalloc(inode, page_start, page_end, NULL); ClearPageChecked(page); set_page_dirty(page); unlock_extent(io_tree, page_start, page_end, GFP_NOFS); diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index d52759daa53f..0b23942cbc0d 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2659,7 +2659,7 @@ static int relocate_file_extent_cluster(struct inode *inode, EXTENT_BOUNDARY, GFP_NOFS); nr++; } - btrfs_set_extent_delalloc(inode, page_start, page_end); + btrfs_set_extent_delalloc(inode, page_start, page_end, NULL); set_page_dirty(page); dirty_page++; -- cgit v1.2.3 From 1406e4327be3a533a2b18582f715ce2cfbcf6804 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 13 Jan 2010 18:19:06 +0000 Subject: Btrfs: add a "df" ioctl for btrfs df is a very loaded question in btrfs. This gives us a way to get the per-space usage information so we can tell exactly what is in use where. This will help us figure out ENOSPC problems, and help users better understand where their disk space is going. Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 45 +++++++++++++++++++++++++++++++++++++++++++++ fs/btrfs/ioctl.h | 14 +++++++++++++- 2 files changed, 58 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 9aaba6e472d3..9213d39d36cc 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1843,6 +1843,49 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp) return 0; } +long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) +{ + struct btrfs_ioctl_space_args space_args; + struct btrfs_ioctl_space_info space; + struct btrfs_ioctl_space_info *dest; + struct btrfs_space_info *info; + int ret = 0; + + if (copy_from_user(&space_args, + (struct btrfs_ioctl_space_args __user *)arg, + sizeof(space_args))) + return -EFAULT; + + space_args.total_spaces = 0; + dest = (struct btrfs_ioctl_space_info *) + (arg + sizeof(struct btrfs_ioctl_space_args)); + + rcu_read_lock(); + list_for_each_entry_rcu(info, &root->fs_info->space_info, list) { + if (!space_args.space_slots) { + space_args.total_spaces++; + continue; + } + if (space_args.total_spaces >= space_args.space_slots) + break; + space.flags = info->flags; + space.total_bytes = info->total_bytes; + space.used_bytes = info->bytes_used; + if (copy_to_user(dest, &space, sizeof(space))) { + ret = -EFAULT; + break; + } + dest++; + space_args.total_spaces++; + } + rcu_read_unlock(); + + if (copy_to_user(arg, &space_args, sizeof(space_args))) + ret = -EFAULT; + + return ret; +} + /* * there are many ways the trans_start and trans_end ioctls can lead * to deadlocks. They should only be used by applications that @@ -1915,6 +1958,8 @@ long btrfs_ioctl(struct file *file, unsigned int return btrfs_ioctl_tree_search(file, argp); case BTRFS_IOC_INO_LOOKUP: return btrfs_ioctl_ino_lookup(file, argp); + case BTRFS_IOC_SPACE_INFO: + return btrfs_ioctl_space_info(root, argp); case BTRFS_IOC_SYNC: btrfs_sync_fs(file->f_dentry->d_sb, 1); return 0; diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h index 2d64a65842f3..81eac0a59ea3 100644 --- a/fs/btrfs/ioctl.h +++ b/fs/btrfs/ioctl.h @@ -1,4 +1,3 @@ - /* * Copyright (C) 2007 Oracle. All rights reserved. * @@ -127,6 +126,17 @@ struct btrfs_ioctl_defrag_range_args { __u32 unused[5]; }; +struct btrfs_ioctl_space_info { + u64 flags; + u64 total_bytes; + u64 used_bytes; +}; + +struct btrfs_ioctl_space_args { + u64 space_slots; + u64 total_spaces; + struct btrfs_ioctl_space_info spaces[0]; +}; #define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \ struct btrfs_ioctl_vol_args) @@ -166,4 +176,6 @@ struct btrfs_ioctl_defrag_range_args { #define BTRFS_IOC_INO_LOOKUP _IOWR(BTRFS_IOCTL_MAGIC, 18, \ struct btrfs_ioctl_ino_lookup_args) #define BTRFS_IOC_DEFAULT_SUBVOL _IOW(BTRFS_IOCTL_MAGIC, 19, u64) +#define BTRFS_IOC_SPACE_INFO _IOWR(BTRFS_IOCTL_MAGIC, 20, \ + struct btrfs_ioctl_space_args) #endif -- cgit v1.2.3 From 91748467a5c5884e44ad5cf58630c0c28474f1f6 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Sun, 28 Feb 2010 10:59:11 +0000 Subject: btrfs: use memparse Use memparse() instead of its own private implementation. Signed-off-by: Akinobu Mita Cc: Chris Mason Cc: linux-btrfs@vger.kernel.org Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 1 - fs/btrfs/ioctl.c | 2 +- fs/btrfs/super.c | 31 +++---------------------------- 3 files changed, 4 insertions(+), 30 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 3f704a816137..11115847d875 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2388,7 +2388,6 @@ void btrfs_sysfs_del_super(struct btrfs_fs_info *root); ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size); /* super.c */ -u64 btrfs_parse_size(char *str); int btrfs_parse_options(struct btrfs_root *root, char *options); int btrfs_sync_fs(struct super_block *sb, int wait); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 9213d39d36cc..363e209679b6 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -776,7 +776,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root, mod = 1; sizestr++; } - new_size = btrfs_parse_size(sizestr); + new_size = memparse(sizestr, NULL); if (new_size == 0) { ret = -EINVAL; goto out_unlock; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index ff3dd55f294d..9ac612e6ca60 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -96,31 +96,6 @@ static match_table_t tokens = { {Opt_err, NULL}, }; -u64 btrfs_parse_size(char *str) -{ - u64 res; - int mult = 1; - char *end; - char last; - - res = simple_strtoul(str, &end, 10); - - last = end[0]; - if (isalpha(last)) { - last = tolower(last); - switch (last) { - case 'g': - mult *= 1024; - case 'm': - mult *= 1024; - case 'k': - mult *= 1024; - } - res = res * mult; - } - return res; -} - /* * Regular mount options parser. Everything that is needed only when * reading in a new superblock is parsed here. @@ -216,7 +191,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) case Opt_max_extent: num = match_strdup(&args[0]); if (num) { - info->max_extent = btrfs_parse_size(num); + info->max_extent = memparse(num, NULL); kfree(num); info->max_extent = max_t(u64, @@ -228,7 +203,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) case Opt_max_inline: num = match_strdup(&args[0]); if (num) { - info->max_inline = btrfs_parse_size(num); + info->max_inline = memparse(num, NULL); kfree(num); if (info->max_inline) { @@ -243,7 +218,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) case Opt_alloc_start: num = match_strdup(&args[0]); if (num) { - info->alloc_start = btrfs_parse_size(num); + info->alloc_start = memparse(num, NULL); kfree(num); printk(KERN_INFO "btrfs: allocations start at %llu\n", -- cgit v1.2.3 From a343832f1a55c74791a8a37053fc02ad80640710 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 6 Jan 2010 11:48:18 +0000 Subject: btrfs: using btrfs_stack_device_id() get devid We can use btrfs_stack_device_id() to get dev_item->devid Signed-off-by: Xiao Guangrong Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index eb89e1317aca..4053fc44d2cf 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -599,7 +599,7 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, goto error_close; disk_super = (struct btrfs_super_block *)bh->b_data; - devid = le64_to_cpu(disk_super->dev_item.devid); + devid = btrfs_stack_device_id(&disk_super->dev_item); if (devid != device->devid) goto error_brelse; @@ -701,7 +701,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, goto error_close; } disk_super = (struct btrfs_super_block *)bh->b_data; - devid = le64_to_cpu(disk_super->dev_item.devid); + devid = btrfs_stack_device_id(&disk_super->dev_item); transid = btrfs_super_generation(disk_super); if (disk_super->label[0]) printk(KERN_INFO "device label %s ", disk_super->label); @@ -1194,7 +1194,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) goto error_close; } disk_super = (struct btrfs_super_block *)bh->b_data; - devid = le64_to_cpu(disk_super->dev_item.devid); + devid = btrfs_stack_device_id(&disk_super->dev_item); dev_uuid = disk_super->dev_item.uuid; device = btrfs_find_device(root, devid, dev_uuid, disk_super->fsid); -- cgit v1.2.3 From 5ff7ba3a797a8ffd5299b8477df2dca3c3ebd34e Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 15 Mar 2010 10:21:30 -0400 Subject: Btrfs: don't look at bio flags after submit_bio After callling submit_bio, the bio can be freed at any time. The btrfs submission thread helper was checking the bio flags too late, which might not give the correct answer. When CONFIG_DEBUG_PAGE_ALLOC is turned on, it can lead to oopsen. Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 4053fc44d2cf..9df8e3f1ccab 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -256,13 +256,13 @@ loop_lock: wake_up(&fs_info->async_submit_wait); BUG_ON(atomic_read(&cur->bi_cnt) == 0); - submit_bio(cur->bi_rw, cur); - num_run++; - batch_run++; if (bio_rw_flagged(cur, BIO_RW_SYNCIO)) num_sync_run++; + submit_bio(cur->bi_rw, cur); + num_run++; + batch_run++; if (need_resched()) { if (num_sync_run) { blk_run_backing_dev(bdi, NULL); -- cgit v1.2.3 From ef5780c018ed169a77b623f87c4ba52faa8ad0fe Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Mon, 15 Mar 2010 11:05:29 -0400 Subject: Btrfs: fix gfp flags masking in the compression code GFP_FS must be masked out, NOFS can't be or'd in. Signed-off-by: Chris Mason --- fs/btrfs/compression.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index a11a32058b50..28b92a7218ab 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -478,7 +478,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, goto next; } - page = alloc_page(mapping_gfp_mask(mapping) | GFP_NOFS); + page = alloc_page(mapping_gfp_mask(mapping) & ~__GFP_FS); if (!page) break; -- cgit v1.2.3 From 8212cf7583a5ba5d213d9c9180be808222a2813f Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 15 Mar 2010 11:22:26 +0300 Subject: cifs: trivial white space I fixed the indent level. Signed-off-by: Dan Carpenter Signed-off-by: Steve French --- fs/cifs/cifssmb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 903d53871da7..20959befc705 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -500,7 +500,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) } else if (pSMBr->hdr.WordCount == 13) { cERROR(1, ("mount failed, cifs module not built " "with CIFS_WEAK_PW_HASH support")); - rc = -EOPNOTSUPP; + rc = -EOPNOTSUPP; #endif /* WEAK_PW_HASH */ goto neg_err_exit; } else if (pSMBr->hdr.WordCount != 17) { -- cgit v1.2.3 From 8e4971f2fb2380ce66196136e113d04196b80fcd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anders=20Grafstr=C3=B6m?= Date: Mon, 15 Mar 2010 16:04:14 +0100 Subject: ARM: 5991/1: Fix regression in restore_user_regs macro MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ARMv5T and earlier require that a ldm {}^ instruction is not followed by an instruction that accesses banked registers. This patch restores the nop that was lost in commit b86040a59feb255a8193173caa4d5199464433d5. Signed-off-by: Anders Grafström Signed-off-by: Russell King --- arch/arm/kernel/entry-header.S | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 7e9ed1eea40a..d93f976fb389 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -102,6 +102,8 @@ .else ldmdb sp, {r0 - lr}^ @ get calling r0 - lr .endif + mov r0, r0 @ ARMv5T and earlier require a nop + @ after ldm {}^ add sp, sp, #S_FRAME_SIZE - S_PC movs pc, lr @ return & move spsr_svc into cpsr .endm -- cgit v1.2.3 From ccf50e2341c4174d5579315e184ea312081acec6 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 15 Mar 2010 19:03:06 +0000 Subject: ARM: Fix sorting of platform group config options and includes ... and document the sorting criteria to help future additions. Signed-off-by: Russell King --- arch/arm/Kconfig | 81 +++++++++++++++++++++++++++++++------------------------- 1 file changed, 45 insertions(+), 36 deletions(-) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index cadfe2ee66a5..c5408bf1bf43 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -218,6 +218,10 @@ config MMU Select if you want MMU-based virtualised addressing space support by paged memory management. If unsure, say 'Y'. +# +# The "ARM system type" choice list is ordered alphabetically by option +# text. Please add new entries in the option alphabetic order. +# choice prompt "ARM system type" default ARCH_VERSATILE @@ -274,6 +278,18 @@ config ARCH_AT91 This enables support for systems based on the Atmel AT91RM9200, AT91SAM9 and AT91CAP9 processors. +config ARCH_BCMRING + bool "Broadcom BCMRING" + depends on MMU + select CPU_V6 + select ARM_AMBA + select COMMON_CLKDEV + select GENERIC_TIME + select GENERIC_CLOCKEVENTS + select ARCH_WANT_OPTIONAL_GPIOLIB + help + Support for Broadcom's BCMRing platform. + config ARCH_CLPS711X bool "Cirrus Logic CLPS711x/EP721x-based" select CPU_ARM720T @@ -359,20 +375,6 @@ config ARCH_H720X help This enables support for systems based on the Hynix HMS720x -config ARCH_NOMADIK - bool "STMicroelectronics Nomadik" - select ARM_AMBA - select ARM_VIC - select CPU_ARM926T - select HAVE_CLK - select COMMON_CLKDEV - select GENERIC_TIME - select GENERIC_CLOCKEVENTS - select GENERIC_GPIO - select ARCH_REQUIRE_GPIOLIB - help - Support for the Nomadik platform by ST-Ericsson - config ARCH_IOP13XX bool "IOP13xx-based" depends on MMU @@ -747,6 +749,30 @@ config ARCH_U300 help Support for ST-Ericsson U300 series mobile platforms. +config ARCH_U8500 + bool "ST-Ericsson U8500 Series" + select CPU_V7 + select ARM_AMBA + select GENERIC_TIME + select GENERIC_CLOCKEVENTS + select COMMON_CLKDEV + help + Support for ST-Ericsson's Ux500 architecture + +config ARCH_NOMADIK + bool "STMicroelectronics Nomadik" + select ARM_AMBA + select ARM_VIC + select CPU_ARM926T + select HAVE_CLK + select COMMON_CLKDEV + select GENERIC_TIME + select GENERIC_CLOCKEVENTS + select GENERIC_GPIO + select ARCH_REQUIRE_GPIOLIB + help + Support for the Nomadik platform by ST-Ericsson + config ARCH_DAVINCI bool "TI DaVinci" select CPU_ARM926T @@ -775,30 +801,13 @@ config ARCH_OMAP help Support for TI's OMAP platform (OMAP1 and OMAP2). -config ARCH_BCMRING - bool "Broadcom BCMRING" - depends on MMU - select CPU_V6 - select ARM_AMBA - select COMMON_CLKDEV - select GENERIC_TIME - select GENERIC_CLOCKEVENTS - select ARCH_WANT_OPTIONAL_GPIOLIB - help - Support for Broadcom's BCMRing platform. - -config ARCH_U8500 - bool "ST-Ericsson U8500 Series" - select CPU_V7 - select ARM_AMBA - select GENERIC_TIME - select GENERIC_CLOCKEVENTS - select COMMON_CLKDEV - help - Support for ST-Ericsson's Ux500 architecture - endchoice +# +# This is sorted alphabetically by mach-* pathname. However, plat-* +# Kconfigs may be included either alphabetically (according to the +# plat- suffix) or along side the corresponding mach-* source. +# source "arch/arm/mach-aaec2000/Kconfig" source "arch/arm/mach-at91/Kconfig" -- cgit v1.2.3 From cfbc0683af235106e7dabe92003870b82ad6f0ba Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 11 Mar 2010 11:20:17 +1100 Subject: NFS: ensure bdi_unregister is called on mount failure. bdi_unregister is called by nfs_put_super which is only called by generic_shutdown_super if ->s_root is not NULL. So if we error out in a circumstance where we called nfs_bdi_register (i.e. server != NULL) but have not set s_root, then we need to call bdi_unregister explicitly in nfs_get_sb and various other *_get_sb() functions. Signed-off-by: NeilBrown Signed-off-by: Trond Myklebust --- fs/nfs/super.c | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/fs/nfs/super.c b/fs/nfs/super.c index f1afee4eea77..6baf9a393466 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -2214,7 +2214,7 @@ static int nfs_get_sb(struct file_system_type *fs_type, } else { error = nfs_bdi_register(server); if (error) - goto error_splat_super; + goto error_splat_bdi; } if (!s->s_root) { @@ -2256,6 +2256,9 @@ out_err_nosb: error_splat_root: dput(mntroot); error_splat_super: + if (server && !s->s_root) + bdi_unregister(&server->backing_dev_info); +error_splat_bdi: deactivate_locked_super(s); goto out; } @@ -2326,7 +2329,7 @@ static int nfs_xdev_get_sb(struct file_system_type *fs_type, int flags, } else { error = nfs_bdi_register(server); if (error) - goto error_splat_super; + goto error_splat_bdi; } if (!s->s_root) { @@ -2363,6 +2366,9 @@ out_err_noserver: return error; error_splat_super: + if (server && !s->s_root) + bdi_unregister(&server->backing_dev_info); +error_splat_bdi: deactivate_locked_super(s); dprintk("<-- nfs_xdev_get_sb() = %d [splat]\n", error); return error; @@ -2578,7 +2584,7 @@ static int nfs4_remote_get_sb(struct file_system_type *fs_type, } else { error = nfs_bdi_register(server); if (error) - goto error_splat_super; + goto error_splat_bdi; } if (!s->s_root) { @@ -2616,6 +2622,9 @@ out_free: error_splat_root: dput(mntroot); error_splat_super: + if (server && !s->s_root) + bdi_unregister(&server->backing_dev_info); +error_splat_bdi: deactivate_locked_super(s); goto out; } @@ -2811,7 +2820,7 @@ static int nfs4_xdev_get_sb(struct file_system_type *fs_type, int flags, } else { error = nfs_bdi_register(server); if (error) - goto error_splat_super; + goto error_splat_bdi; } if (!s->s_root) { @@ -2847,6 +2856,9 @@ out_err_noserver: return error; error_splat_super: + if (server && !s->s_root) + bdi_unregister(&server->backing_dev_info); +error_splat_bdi: deactivate_locked_super(s); dprintk("<-- nfs4_xdev_get_sb() = %d [splat]\n", error); return error; @@ -2893,7 +2905,7 @@ static int nfs4_remote_referral_get_sb(struct file_system_type *fs_type, } else { error = nfs_bdi_register(server); if (error) - goto error_splat_super; + goto error_splat_bdi; } if (!s->s_root) { @@ -2929,6 +2941,9 @@ out_err_noserver: return error; error_splat_super: + if (server && !s->s_root) + bdi_unregister(&server->backing_dev_info); +error_splat_bdi: deactivate_locked_super(s); dprintk("<-- nfs4_referral_get_sb() = %d [splat]\n", error); return error; -- cgit v1.2.3 From 0d5e6f7ae8609b944c08e8a2f63f7d169c548134 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 15 Mar 2010 22:04:24 +0000 Subject: ARM: Fix RiscPC decompressor build errors arch/arm/boot/compressed/decompress.o: In function `do_decompress': decompress.c:(.text+0x26e8): undefined reference to `error' decompress.c:(.text+0x2760): undefined reference to `error' decompress.c:(.text+0x27d8): undefined reference to `error' decompress.c:(.text+0x2824): undefined reference to `error' decompress.c:(.text+0x28f0): undefined reference to `error' Signed-off-by: Russell King --- arch/arm/mach-rpc/include/mach/uncompress.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/arm/mach-rpc/include/mach/uncompress.h b/arch/arm/mach-rpc/include/mach/uncompress.h index d5862368c4f2..8c9e2c7161c6 100644 --- a/arch/arm/mach-rpc/include/mach/uncompress.h +++ b/arch/arm/mach-rpc/include/mach/uncompress.h @@ -109,8 +109,6 @@ static inline void flush(void) { } -static void error(char *x); - /* * Setup for decompression */ -- cgit v1.2.3 From 664e87e47e086962a832f9a613a8fd4042a02ac1 Mon Sep 17 00:00:00 2001 From: Vasily Khoruzhick Date: Sun, 14 Mar 2010 16:32:28 +0200 Subject: ARM: S3C2440: Fix s3c2440 cpufreq compilation post move. Fix compilation issue by moving s3c2440-cpufreq.c into mach-s3c2440 directory Signed-off-by: Vasily Khoruzhick Signed-off-by: Ben Dooks --- arch/arm/mach-s3c2440/s3c2440-cpufreq.c | 311 ++++++++++++++++++++++++++++++++ arch/arm/plat-s3c24xx/s3c2440-cpufreq.c | 311 -------------------------------- 2 files changed, 311 insertions(+), 311 deletions(-) create mode 100644 arch/arm/mach-s3c2440/s3c2440-cpufreq.c delete mode 100644 arch/arm/plat-s3c24xx/s3c2440-cpufreq.c diff --git a/arch/arm/mach-s3c2440/s3c2440-cpufreq.c b/arch/arm/mach-s3c2440/s3c2440-cpufreq.c new file mode 100644 index 000000000000..976002fb1b8f --- /dev/null +++ b/arch/arm/mach-s3c2440/s3c2440-cpufreq.c @@ -0,0 +1,311 @@ +/* linux/arch/arm/plat-s3c24xx/s3c2440-cpufreq.c + * + * Copyright (c) 2006-2009 Simtec Electronics + * http://armlinux.simtec.co.uk/ + * Ben Dooks + * Vincent Sanders + * + * S3C2440/S3C2442 CPU Frequency scaling + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include + +#include +#include +#include + +static struct clk *xtal; +static struct clk *fclk; +static struct clk *hclk; +static struct clk *armclk; + +/* HDIV: 1, 2, 3, 4, 6, 8 */ + +static inline int within_khz(unsigned long a, unsigned long b) +{ + long diff = a - b; + + return (diff >= -1000 && diff <= 1000); +} + +/** + * s3c2440_cpufreq_calcdivs - calculate divider settings + * @cfg: The cpu frequency settings. + * + * Calcualte the divider values for the given frequency settings + * specified in @cfg. The values are stored in @cfg for later use + * by the relevant set routine if the request settings can be reached. + */ +int s3c2440_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg) +{ + unsigned int hdiv, pdiv; + unsigned long hclk, fclk, armclk; + unsigned long hclk_max; + + fclk = cfg->freq.fclk; + armclk = cfg->freq.armclk; + hclk_max = cfg->max.hclk; + + s3c_freq_dbg("%s: fclk is %lu, armclk %lu, max hclk %lu\n", + __func__, fclk, armclk, hclk_max); + + if (armclk > fclk) { + printk(KERN_WARNING "%s: armclk > fclk\n", __func__); + armclk = fclk; + } + + /* if we are in DVS, we need HCLK to be <= ARMCLK */ + if (armclk < fclk && armclk < hclk_max) + hclk_max = armclk; + + for (hdiv = 1; hdiv < 9; hdiv++) { + if (hdiv == 5 || hdiv == 7) + hdiv++; + + hclk = (fclk / hdiv); + if (hclk <= hclk_max || within_khz(hclk, hclk_max)) + break; + } + + s3c_freq_dbg("%s: hclk %lu, div %d\n", __func__, hclk, hdiv); + + if (hdiv > 8) + goto invalid; + + pdiv = (hclk > cfg->max.pclk) ? 2 : 1; + + if ((hclk / pdiv) > cfg->max.pclk) + pdiv++; + + s3c_freq_dbg("%s: pdiv %d\n", __func__, pdiv); + + if (pdiv > 2) + goto invalid; + + pdiv *= hdiv; + + /* calculate a valid armclk */ + + if (armclk < hclk) + armclk = hclk; + + /* if we're running armclk lower than fclk, this really means + * that the system should go into dvs mode, which means that + * armclk is connected to hclk. */ + if (armclk < fclk) { + cfg->divs.dvs = 1; + armclk = hclk; + } else + cfg->divs.dvs = 0; + + cfg->freq.armclk = armclk; + + /* store the result, and then return */ + + cfg->divs.h_divisor = hdiv; + cfg->divs.p_divisor = pdiv; + + return 0; + + invalid: + return -EINVAL; +} + +#define CAMDIVN_HCLK_HALF (S3C2440_CAMDIVN_HCLK3_HALF | \ + S3C2440_CAMDIVN_HCLK4_HALF) + +/** + * s3c2440_cpufreq_setdivs - set the cpu frequency divider settings + * @cfg: The cpu frequency settings. + * + * Set the divisors from the settings in @cfg, which where generated + * during the calculation phase by s3c2440_cpufreq_calcdivs(). + */ +static void s3c2440_cpufreq_setdivs(struct s3c_cpufreq_config *cfg) +{ + unsigned long clkdiv, camdiv; + + s3c_freq_dbg("%s: divsiors: h=%d, p=%d\n", __func__, + cfg->divs.h_divisor, cfg->divs.p_divisor); + + clkdiv = __raw_readl(S3C2410_CLKDIVN); + camdiv = __raw_readl(S3C2440_CAMDIVN); + + clkdiv &= ~(S3C2440_CLKDIVN_HDIVN_MASK | S3C2440_CLKDIVN_PDIVN); + camdiv &= ~CAMDIVN_HCLK_HALF; + + switch (cfg->divs.h_divisor) { + case 1: + clkdiv |= S3C2440_CLKDIVN_HDIVN_1; + break; + + case 2: + clkdiv |= S3C2440_CLKDIVN_HDIVN_2; + break; + + case 6: + camdiv |= S3C2440_CAMDIVN_HCLK3_HALF; + case 3: + clkdiv |= S3C2440_CLKDIVN_HDIVN_3_6; + break; + + case 8: + camdiv |= S3C2440_CAMDIVN_HCLK4_HALF; + case 4: + clkdiv |= S3C2440_CLKDIVN_HDIVN_4_8; + break; + + default: + BUG(); /* we don't expect to get here. */ + } + + if (cfg->divs.p_divisor != cfg->divs.h_divisor) + clkdiv |= S3C2440_CLKDIVN_PDIVN; + + /* todo - set pclk. */ + + /* Write the divisors first with hclk intentionally halved so that + * when we write clkdiv we will under-frequency instead of over. We + * then make a short delay and remove the hclk halving if necessary. + */ + + __raw_writel(camdiv | CAMDIVN_HCLK_HALF, S3C2440_CAMDIVN); + __raw_writel(clkdiv, S3C2410_CLKDIVN); + + ndelay(20); + __raw_writel(camdiv, S3C2440_CAMDIVN); + + clk_set_parent(armclk, cfg->divs.dvs ? hclk : fclk); +} + +static int run_freq_for(unsigned long max_hclk, unsigned long fclk, + int *divs, + struct cpufreq_frequency_table *table, + size_t table_size) +{ + unsigned long freq; + int index = 0; + int div; + + for (div = *divs; div > 0; div = *divs++) { + freq = fclk / div; + + if (freq > max_hclk && div != 1) + continue; + + freq /= 1000; /* table is in kHz */ + index = s3c_cpufreq_addfreq(table, index, table_size, freq); + if (index < 0) + break; + } + + return index; +} + +static int hclk_divs[] = { 1, 2, 3, 4, 6, 8, -1 }; + +static int s3c2440_cpufreq_calctable(struct s3c_cpufreq_config *cfg, + struct cpufreq_frequency_table *table, + size_t table_size) +{ + int ret; + + WARN_ON(cfg->info == NULL); + WARN_ON(cfg->board == NULL); + + ret = run_freq_for(cfg->info->max.hclk, + cfg->info->max.fclk, + hclk_divs, + table, table_size); + + s3c_freq_dbg("%s: returning %d\n", __func__, ret); + + return ret; +} + +struct s3c_cpufreq_info s3c2440_cpufreq_info = { + .max = { + .fclk = 400000000, + .hclk = 133333333, + .pclk = 66666666, + }, + + .locktime_m = 300, + .locktime_u = 300, + .locktime_bits = 16, + + .name = "s3c244x", + .calc_iotiming = s3c2410_iotiming_calc, + .set_iotiming = s3c2410_iotiming_set, + .get_iotiming = s3c2410_iotiming_get, + .set_fvco = s3c2410_set_fvco, + + .set_refresh = s3c2410_cpufreq_setrefresh, + .set_divs = s3c2440_cpufreq_setdivs, + .calc_divs = s3c2440_cpufreq_calcdivs, + .calc_freqtable = s3c2440_cpufreq_calctable, + + .resume_clocks = s3c244x_setup_clocks, + + .debug_io_show = s3c_cpufreq_debugfs_call(s3c2410_iotiming_debugfs), +}; + +static int s3c2440_cpufreq_add(struct sys_device *sysdev) +{ + xtal = s3c_cpufreq_clk_get(NULL, "xtal"); + hclk = s3c_cpufreq_clk_get(NULL, "hclk"); + fclk = s3c_cpufreq_clk_get(NULL, "fclk"); + armclk = s3c_cpufreq_clk_get(NULL, "armclk"); + + if (IS_ERR(xtal) || IS_ERR(hclk) || IS_ERR(fclk) || IS_ERR(armclk)) { + printk(KERN_ERR "%s: failed to get clocks\n", __func__); + return -ENOENT; + } + + return s3c_cpufreq_register(&s3c2440_cpufreq_info); +} + +static struct sysdev_driver s3c2440_cpufreq_driver = { + .add = s3c2440_cpufreq_add, +}; + +static int s3c2440_cpufreq_init(void) +{ + return sysdev_driver_register(&s3c2440_sysclass, + &s3c2440_cpufreq_driver); +} + +/* arch_initcall adds the clocks we need, so use subsys_initcall. */ +subsys_initcall(s3c2440_cpufreq_init); + +static struct sysdev_driver s3c2442_cpufreq_driver = { + .add = s3c2440_cpufreq_add, +}; + +static int s3c2442_cpufreq_init(void) +{ + return sysdev_driver_register(&s3c2442_sysclass, + &s3c2442_cpufreq_driver); +} + +subsys_initcall(s3c2442_cpufreq_init); diff --git a/arch/arm/plat-s3c24xx/s3c2440-cpufreq.c b/arch/arm/plat-s3c24xx/s3c2440-cpufreq.c deleted file mode 100644 index 976002fb1b8f..000000000000 --- a/arch/arm/plat-s3c24xx/s3c2440-cpufreq.c +++ /dev/null @@ -1,311 +0,0 @@ -/* linux/arch/arm/plat-s3c24xx/s3c2440-cpufreq.c - * - * Copyright (c) 2006-2009 Simtec Electronics - * http://armlinux.simtec.co.uk/ - * Ben Dooks - * Vincent Sanders - * - * S3C2440/S3C2442 CPU Frequency scaling - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include - -#include - -#include -#include -#include - -static struct clk *xtal; -static struct clk *fclk; -static struct clk *hclk; -static struct clk *armclk; - -/* HDIV: 1, 2, 3, 4, 6, 8 */ - -static inline int within_khz(unsigned long a, unsigned long b) -{ - long diff = a - b; - - return (diff >= -1000 && diff <= 1000); -} - -/** - * s3c2440_cpufreq_calcdivs - calculate divider settings - * @cfg: The cpu frequency settings. - * - * Calcualte the divider values for the given frequency settings - * specified in @cfg. The values are stored in @cfg for later use - * by the relevant set routine if the request settings can be reached. - */ -int s3c2440_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg) -{ - unsigned int hdiv, pdiv; - unsigned long hclk, fclk, armclk; - unsigned long hclk_max; - - fclk = cfg->freq.fclk; - armclk = cfg->freq.armclk; - hclk_max = cfg->max.hclk; - - s3c_freq_dbg("%s: fclk is %lu, armclk %lu, max hclk %lu\n", - __func__, fclk, armclk, hclk_max); - - if (armclk > fclk) { - printk(KERN_WARNING "%s: armclk > fclk\n", __func__); - armclk = fclk; - } - - /* if we are in DVS, we need HCLK to be <= ARMCLK */ - if (armclk < fclk && armclk < hclk_max) - hclk_max = armclk; - - for (hdiv = 1; hdiv < 9; hdiv++) { - if (hdiv == 5 || hdiv == 7) - hdiv++; - - hclk = (fclk / hdiv); - if (hclk <= hclk_max || within_khz(hclk, hclk_max)) - break; - } - - s3c_freq_dbg("%s: hclk %lu, div %d\n", __func__, hclk, hdiv); - - if (hdiv > 8) - goto invalid; - - pdiv = (hclk > cfg->max.pclk) ? 2 : 1; - - if ((hclk / pdiv) > cfg->max.pclk) - pdiv++; - - s3c_freq_dbg("%s: pdiv %d\n", __func__, pdiv); - - if (pdiv > 2) - goto invalid; - - pdiv *= hdiv; - - /* calculate a valid armclk */ - - if (armclk < hclk) - armclk = hclk; - - /* if we're running armclk lower than fclk, this really means - * that the system should go into dvs mode, which means that - * armclk is connected to hclk. */ - if (armclk < fclk) { - cfg->divs.dvs = 1; - armclk = hclk; - } else - cfg->divs.dvs = 0; - - cfg->freq.armclk = armclk; - - /* store the result, and then return */ - - cfg->divs.h_divisor = hdiv; - cfg->divs.p_divisor = pdiv; - - return 0; - - invalid: - return -EINVAL; -} - -#define CAMDIVN_HCLK_HALF (S3C2440_CAMDIVN_HCLK3_HALF | \ - S3C2440_CAMDIVN_HCLK4_HALF) - -/** - * s3c2440_cpufreq_setdivs - set the cpu frequency divider settings - * @cfg: The cpu frequency settings. - * - * Set the divisors from the settings in @cfg, which where generated - * during the calculation phase by s3c2440_cpufreq_calcdivs(). - */ -static void s3c2440_cpufreq_setdivs(struct s3c_cpufreq_config *cfg) -{ - unsigned long clkdiv, camdiv; - - s3c_freq_dbg("%s: divsiors: h=%d, p=%d\n", __func__, - cfg->divs.h_divisor, cfg->divs.p_divisor); - - clkdiv = __raw_readl(S3C2410_CLKDIVN); - camdiv = __raw_readl(S3C2440_CAMDIVN); - - clkdiv &= ~(S3C2440_CLKDIVN_HDIVN_MASK | S3C2440_CLKDIVN_PDIVN); - camdiv &= ~CAMDIVN_HCLK_HALF; - - switch (cfg->divs.h_divisor) { - case 1: - clkdiv |= S3C2440_CLKDIVN_HDIVN_1; - break; - - case 2: - clkdiv |= S3C2440_CLKDIVN_HDIVN_2; - break; - - case 6: - camdiv |= S3C2440_CAMDIVN_HCLK3_HALF; - case 3: - clkdiv |= S3C2440_CLKDIVN_HDIVN_3_6; - break; - - case 8: - camdiv |= S3C2440_CAMDIVN_HCLK4_HALF; - case 4: - clkdiv |= S3C2440_CLKDIVN_HDIVN_4_8; - break; - - default: - BUG(); /* we don't expect to get here. */ - } - - if (cfg->divs.p_divisor != cfg->divs.h_divisor) - clkdiv |= S3C2440_CLKDIVN_PDIVN; - - /* todo - set pclk. */ - - /* Write the divisors first with hclk intentionally halved so that - * when we write clkdiv we will under-frequency instead of over. We - * then make a short delay and remove the hclk halving if necessary. - */ - - __raw_writel(camdiv | CAMDIVN_HCLK_HALF, S3C2440_CAMDIVN); - __raw_writel(clkdiv, S3C2410_CLKDIVN); - - ndelay(20); - __raw_writel(camdiv, S3C2440_CAMDIVN); - - clk_set_parent(armclk, cfg->divs.dvs ? hclk : fclk); -} - -static int run_freq_for(unsigned long max_hclk, unsigned long fclk, - int *divs, - struct cpufreq_frequency_table *table, - size_t table_size) -{ - unsigned long freq; - int index = 0; - int div; - - for (div = *divs; div > 0; div = *divs++) { - freq = fclk / div; - - if (freq > max_hclk && div != 1) - continue; - - freq /= 1000; /* table is in kHz */ - index = s3c_cpufreq_addfreq(table, index, table_size, freq); - if (index < 0) - break; - } - - return index; -} - -static int hclk_divs[] = { 1, 2, 3, 4, 6, 8, -1 }; - -static int s3c2440_cpufreq_calctable(struct s3c_cpufreq_config *cfg, - struct cpufreq_frequency_table *table, - size_t table_size) -{ - int ret; - - WARN_ON(cfg->info == NULL); - WARN_ON(cfg->board == NULL); - - ret = run_freq_for(cfg->info->max.hclk, - cfg->info->max.fclk, - hclk_divs, - table, table_size); - - s3c_freq_dbg("%s: returning %d\n", __func__, ret); - - return ret; -} - -struct s3c_cpufreq_info s3c2440_cpufreq_info = { - .max = { - .fclk = 400000000, - .hclk = 133333333, - .pclk = 66666666, - }, - - .locktime_m = 300, - .locktime_u = 300, - .locktime_bits = 16, - - .name = "s3c244x", - .calc_iotiming = s3c2410_iotiming_calc, - .set_iotiming = s3c2410_iotiming_set, - .get_iotiming = s3c2410_iotiming_get, - .set_fvco = s3c2410_set_fvco, - - .set_refresh = s3c2410_cpufreq_setrefresh, - .set_divs = s3c2440_cpufreq_setdivs, - .calc_divs = s3c2440_cpufreq_calcdivs, - .calc_freqtable = s3c2440_cpufreq_calctable, - - .resume_clocks = s3c244x_setup_clocks, - - .debug_io_show = s3c_cpufreq_debugfs_call(s3c2410_iotiming_debugfs), -}; - -static int s3c2440_cpufreq_add(struct sys_device *sysdev) -{ - xtal = s3c_cpufreq_clk_get(NULL, "xtal"); - hclk = s3c_cpufreq_clk_get(NULL, "hclk"); - fclk = s3c_cpufreq_clk_get(NULL, "fclk"); - armclk = s3c_cpufreq_clk_get(NULL, "armclk"); - - if (IS_ERR(xtal) || IS_ERR(hclk) || IS_ERR(fclk) || IS_ERR(armclk)) { - printk(KERN_ERR "%s: failed to get clocks\n", __func__); - return -ENOENT; - } - - return s3c_cpufreq_register(&s3c2440_cpufreq_info); -} - -static struct sysdev_driver s3c2440_cpufreq_driver = { - .add = s3c2440_cpufreq_add, -}; - -static int s3c2440_cpufreq_init(void) -{ - return sysdev_driver_register(&s3c2440_sysclass, - &s3c2440_cpufreq_driver); -} - -/* arch_initcall adds the clocks we need, so use subsys_initcall. */ -subsys_initcall(s3c2440_cpufreq_init); - -static struct sysdev_driver s3c2442_cpufreq_driver = { - .add = s3c2440_cpufreq_add, -}; - -static int s3c2442_cpufreq_init(void) -{ - return sysdev_driver_register(&s3c2442_sysclass, - &s3c2442_cpufreq_driver); -} - -subsys_initcall(s3c2442_cpufreq_init); -- cgit v1.2.3 From e79032aa75af76a14e2cdd973b199855e7761881 Mon Sep 17 00:00:00 2001 From: Vasily Khoruzhick Date: Mon, 15 Mar 2010 16:27:08 +0200 Subject: ARM: SAMSUNG: Add suspend/resume support for S3C PWM driver Reset period_ns and duty_ns values in suspend handler to avoid skip of configuration if same values passed to pwm_config; Restore invertion bit in resume handler. Without this patch PWM works incorrectly after resume from suspend. Signed-off-by: Vasily Khoruzhick Signed-off-by: Ben Dooks --- arch/arm/plat-samsung/pwm.c | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/arch/arm/plat-samsung/pwm.c b/arch/arm/plat-samsung/pwm.c index ef019f27b67d..f2d11390d01c 100644 --- a/arch/arm/plat-samsung/pwm.c +++ b/arch/arm/plat-samsung/pwm.c @@ -379,6 +379,39 @@ static int __devexit s3c_pwm_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM +static int s3c_pwm_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct pwm_device *pwm = platform_get_drvdata(pdev); + + /* No one preserve these values during suspend so reset them + * Otherwise driver leaves PWM unconfigured if same values + * passed to pwm_config + */ + pwm->period_ns = 0; + pwm->duty_ns = 0; + + return 0; +} + +static int s3c_pwm_resume(struct platform_device *pdev) +{ + struct pwm_device *pwm = platform_get_drvdata(pdev); + unsigned long tcon; + + /* Restore invertion */ + tcon = __raw_readl(S3C2410_TCON); + tcon |= pwm_tcon_invert(pwm); + __raw_writel(tcon, S3C2410_TCON); + + return 0; +} + +#else +#define s3c_pwm_suspend NULL +#define s3c_pwm_resume NULL +#endif + static struct platform_driver s3c_pwm_driver = { .driver = { .name = "s3c24xx-pwm", @@ -386,6 +419,8 @@ static struct platform_driver s3c_pwm_driver = { }, .probe = s3c_pwm_probe, .remove = __devexit_p(s3c_pwm_remove), + .suspend = s3c_pwm_suspend, + .resume = s3c_pwm_resume, }; static int __init pwm_init(void) -- cgit v1.2.3 From 65e543f12cb7f98cb041a759a21685569eec8695 Mon Sep 17 00:00:00 2001 From: Ben Dooks Date: Mon, 15 Mar 2010 22:57:18 +0000 Subject: ARM: SAMSUNG: Fix build error from stale define in The decleration of error() as static in the platform specific uncompress code in arch/arm/plat-samsung/include/plat/uncomopress.h causes the build of the uncompressor to break. Remove it, as it is no longer needed. arch/arm/boot/compressed/decompress.o: In function `gunzip': /var/tmp/kernel-orig/arch/arm/boot/compressed/../../../../lib/decompress_inflate.c:67: undefined reference to `error' /var/tmp/kernel-orig/arch/arm/boot/compressed/../../../../lib/decompress_inflate.c:73: undefined reference to `error' /var/tmp/kernel-orig/arch/arm/boot/compressed/../../../../lib/decompress_inflate.c:80: undefined reference to `error' /var/tmp/kernel-orig/arch/arm/boot/compressed/../../../../lib/decompress_inflate.c:95: undefined reference to `error' /var/tmp/kernel-orig/arch/arm/boot/compressed/../../../../lib/decompress_inflate.c:152: undefined reference to `error' Signed-off-by: Ben Dooks --- arch/arm/plat-samsung/include/plat/uncompress.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/arm/plat-samsung/include/plat/uncompress.h b/arch/arm/plat-samsung/include/plat/uncompress.h index e87ce8ffbbcd..7d6ed7263d57 100644 --- a/arch/arm/plat-samsung/include/plat/uncompress.h +++ b/arch/arm/plat-samsung/include/plat/uncompress.h @@ -140,8 +140,6 @@ static void arch_decomp_error(const char *x) #define arch_error arch_decomp_error #endif -static void error(char *err); - #ifdef CONFIG_S3C_BOOT_UART_FORCE_FIFO static inline void arch_enable_uart_fifo(void) { -- cgit v1.2.3 From 0e17226f7cd289504724466f4298abc9bdfca3fe Mon Sep 17 00:00:00 2001 From: Ben Dooks Date: Mon, 15 Mar 2010 23:08:03 +0000 Subject: ARM: SAMSUNG: Fixup commit 4e6d488af37980d224cbf298224db6173673f362 Commit 4e6d488af37980d224cbf298224db6173673f362 either missed out the following machine files or somehow managed to clash between merges. Fixup the three files missing the second parameter to addruart macro to allow them to build. Fixes the following warnings in arch/arm/kernel/debug.c: arch/arm/kernel/debug.S: Assembler messages: arch/arm/kernel/debug.S:167: Error: too many positional arguments arch/arm/kernel/debug.S:183: Error: too many positional arguments Signed-off-by: Ben Dooks --- arch/arm/mach-s3c64xx/include/mach/debug-macro.S | 2 +- arch/arm/mach-s5p6440/include/mach/debug-macro.S | 2 +- arch/arm/mach-s5p6442/include/mach/debug-macro.S | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/arm/mach-s3c64xx/include/mach/debug-macro.S b/arch/arm/mach-s3c64xx/include/mach/debug-macro.S index b18ac5266dfc..f9ab5d26052a 100644 --- a/arch/arm/mach-s3c64xx/include/mach/debug-macro.S +++ b/arch/arm/mach-s3c64xx/include/mach/debug-macro.S @@ -21,7 +21,7 @@ * aligned and add in the offset when we load the value here. */ - .macro addruart, rx + .macro addruart, rx, rtmp mrc p15, 0, \rx, c1, c0 tst \rx, #1 ldreq \rx, = S3C_PA_UART diff --git a/arch/arm/mach-s5p6440/include/mach/debug-macro.S b/arch/arm/mach-s5p6440/include/mach/debug-macro.S index 48cdb0da026c..1347d7f99079 100644 --- a/arch/arm/mach-s5p6440/include/mach/debug-macro.S +++ b/arch/arm/mach-s5p6440/include/mach/debug-macro.S @@ -19,7 +19,7 @@ * aligned and add in the offset when we load the value here. */ - .macro addruart, rx + .macro addruart, rx, rtmp mrc p15, 0, \rx, c1, c0 tst \rx, #1 ldreq \rx, = S3C_PA_UART diff --git a/arch/arm/mach-s5p6442/include/mach/debug-macro.S b/arch/arm/mach-s5p6442/include/mach/debug-macro.S index 1aae691e58ef..bb6536147ffb 100644 --- a/arch/arm/mach-s5p6442/include/mach/debug-macro.S +++ b/arch/arm/mach-s5p6442/include/mach/debug-macro.S @@ -15,7 +15,7 @@ #include #include - .macro addruart, rx + .macro addruart, rx, rtmp mrc p15, 0, \rx, c1, c0 tst \rx, #1 ldreq \rx, = S3C_PA_UART -- cgit v1.2.3 From 627a2d3c29427637f4c5d31ccc7fcbd8d312cd71 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 8 Mar 2010 16:44:38 +1100 Subject: md: deal with merge_bvec_fn in component devices better. If a component device has a merge_bvec_fn then as we never call it we must ensure we never need to. Currently this is done by setting max_sector to 1 PAGE, however this does not stop a bio being created with several sub-page iovecs that would violate the merge_bvec_fn. So instead set max_segments to 1 and set the segment boundary to the same as a page boundary to ensure there is only ever one single-page segment of IO requested at a time. This can particularly be an issue when 'xen' is used as it is known to submit multiple small buffers in a single bio. Signed-off-by: NeilBrown Cc: stable@kernel.org --- drivers/md/linear.c | 12 +++++++----- drivers/md/multipath.c | 20 ++++++++++++-------- drivers/md/raid0.c | 13 +++++++------ drivers/md/raid1.c | 28 +++++++++++++++++----------- drivers/md/raid10.c | 28 +++++++++++++++++----------- 5 files changed, 60 insertions(+), 41 deletions(-) diff --git a/drivers/md/linear.c b/drivers/md/linear.c index af2d39d603c7..bb2a23159b21 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -172,12 +172,14 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) disk_stack_limits(mddev->gendisk, rdev->bdev, rdev->data_offset << 9); /* as we don't honour merge_bvec_fn, we must never risk - * violating it, so limit ->max_sector to one PAGE, as - * a one page request is never in violation. + * violating it, so limit max_segments to 1 lying within + * a single page. */ - if (rdev->bdev->bd_disk->queue->merge_bvec_fn && - queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) - blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); + if (rdev->bdev->bd_disk->queue->merge_bvec_fn) { + blk_queue_max_segments(mddev->queue, 1); + blk_queue_segment_boundary(mddev->queue, + PAGE_CACHE_SIZE - 1); + } conf->array_sectors += rdev->sectors; cnt++; diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 4b323f45ad74..5558ebc705c8 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -301,14 +301,16 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) rdev->data_offset << 9); /* as we don't honour merge_bvec_fn, we must never risk - * violating it, so limit ->max_sector to one PAGE, as - * a one page request is never in violation. + * violating it, so limit ->max_segments to one, lying + * within a single page. * (Note: it is very unlikely that a device with * merge_bvec_fn will be involved in multipath.) */ - if (q->merge_bvec_fn && - queue_max_sectors(q) > (PAGE_SIZE>>9)) - blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); + if (q->merge_bvec_fn) { + blk_queue_max_segments(mddev->queue, 1); + blk_queue_segment_boundary(mddev->queue, + PAGE_CACHE_SIZE - 1); + } conf->working_disks++; mddev->degraded--; @@ -476,9 +478,11 @@ static int multipath_run (mddev_t *mddev) /* as we don't honour merge_bvec_fn, we must never risk * violating it, not that we ever expect a device with * a merge_bvec_fn to be involved in multipath */ - if (rdev->bdev->bd_disk->queue->merge_bvec_fn && - queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) - blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); + if (rdev->bdev->bd_disk->queue->merge_bvec_fn) { + blk_queue_max_segments(mddev->queue, 1); + blk_queue_segment_boundary(mddev->queue, + PAGE_CACHE_SIZE - 1); + } if (!test_bit(Faulty, &rdev->flags)) conf->working_disks++; diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index a1f7147b757f..377cf2a3c333 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -176,14 +176,15 @@ static int create_strip_zones(mddev_t *mddev) disk_stack_limits(mddev->gendisk, rdev1->bdev, rdev1->data_offset << 9); /* as we don't honour merge_bvec_fn, we must never risk - * violating it, so limit ->max_sector to one PAGE, as - * a one page request is never in violation. + * violating it, so limit ->max_segments to 1, lying within + * a single page. */ - if (rdev1->bdev->bd_disk->queue->merge_bvec_fn && - queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) - blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); - + if (rdev1->bdev->bd_disk->queue->merge_bvec_fn) { + blk_queue_max_segments(mddev->queue, 1); + blk_queue_segment_boundary(mddev->queue, + PAGE_CACHE_SIZE - 1); + } if (!smallest || (rdev1->sectors < smallest->sectors)) smallest = rdev1; cnt++; diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 5a06122abd3b..f741f77eeb2b 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1152,13 +1152,17 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) disk_stack_limits(mddev->gendisk, rdev->bdev, rdev->data_offset << 9); - /* as we don't honour merge_bvec_fn, we must never risk - * violating it, so limit ->max_sector to one PAGE, as - * a one page request is never in violation. + /* as we don't honour merge_bvec_fn, we must + * never risk violating it, so limit + * ->max_segments to one lying with a single + * page, as a one page request is never in + * violation. */ - if (rdev->bdev->bd_disk->queue->merge_bvec_fn && - queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) - blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); + if (rdev->bdev->bd_disk->queue->merge_bvec_fn) { + blk_queue_max_segments(mddev->queue, 1); + blk_queue_segment_boundary(mddev->queue, + PAGE_CACHE_SIZE - 1); + } p->head_position = 0; rdev->raid_disk = mirror; @@ -2098,12 +2102,14 @@ static int run(mddev_t *mddev) disk_stack_limits(mddev->gendisk, rdev->bdev, rdev->data_offset << 9); /* as we don't honour merge_bvec_fn, we must never risk - * violating it, so limit ->max_sector to one PAGE, as - * a one page request is never in violation. + * violating it, so limit ->max_segments to 1 lying within + * a single page, as a one page request is never in violation. */ - if (rdev->bdev->bd_disk->queue->merge_bvec_fn && - queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) - blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); + if (rdev->bdev->bd_disk->queue->merge_bvec_fn) { + blk_queue_max_segments(mddev->queue, 1); + blk_queue_segment_boundary(mddev->queue, + PAGE_CACHE_SIZE - 1); + } } mddev->degraded = 0; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 7584f9ab9bcf..b4ba41ecbd20 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1155,13 +1155,17 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) disk_stack_limits(mddev->gendisk, rdev->bdev, rdev->data_offset << 9); - /* as we don't honour merge_bvec_fn, we must never risk - * violating it, so limit ->max_sector to one PAGE, as - * a one page request is never in violation. + /* as we don't honour merge_bvec_fn, we must + * never risk violating it, so limit + * ->max_segments to one lying with a single + * page, as a one page request is never in + * violation. */ - if (rdev->bdev->bd_disk->queue->merge_bvec_fn && - queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) - blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); + if (rdev->bdev->bd_disk->queue->merge_bvec_fn) { + blk_queue_max_segments(mddev->queue, 1); + blk_queue_segment_boundary(mddev->queue, + PAGE_CACHE_SIZE - 1); + } p->head_position = 0; rdev->raid_disk = mirror; @@ -2255,12 +2259,14 @@ static int run(mddev_t *mddev) disk_stack_limits(mddev->gendisk, rdev->bdev, rdev->data_offset << 9); /* as we don't honour merge_bvec_fn, we must never risk - * violating it, so limit ->max_sector to one PAGE, as - * a one page request is never in violation. + * violating it, so limit max_segments to 1 lying + * within a single page. */ - if (rdev->bdev->bd_disk->queue->merge_bvec_fn && - queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) - blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); + if (rdev->bdev->bd_disk->queue->merge_bvec_fn) { + blk_queue_max_segments(mddev->queue, 1); + blk_queue_segment_boundary(mddev->queue, + PAGE_CACHE_SIZE - 1); + } disk->head_position = 0; } -- cgit v1.2.3 From 572c0e3c73341755f3e7dfaaef6b26df12bd709c Mon Sep 17 00:00:00 2001 From: Daniel T Chen Date: Sun, 14 Mar 2010 23:44:03 -0400 Subject: ALSA: hda: Use LPIB and 6stack-dig for eMachines T5212 BugLink: https://bugs.launchpad.net/bugs/538895 The OR has verified that both position_fix=1 and model=6stack-dig are necessary to have capture function properly. (The existing 3stack-6ch model quirk seems to be incorrect.) Reported-by: Reuben Bailey Tested-by: Reuben Bailey Cc: Signed-off-by: Daniel T Chen Signed-off-by: Takashi Iwai --- sound/pci/hda/hda_intel.c | 1 + sound/pci/hda/patch_realtek.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 027d3f4c1c59..1766ad2926d6 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2271,6 +2271,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = { SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB), SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB), SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB), + SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB), {} }; diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 07637c4aa46f..4ec57633af88 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -9237,7 +9237,7 @@ static struct snd_pci_quirk alc882_cfg_tbl[] = { SND_PCI_QUIRK(0x8086, 0x0022, "DX58SO", ALC889_INTEL), SND_PCI_QUIRK(0x8086, 0x0021, "Intel IbexPeak", ALC889A_INTEL), SND_PCI_QUIRK(0x8086, 0x3b56, "Intel IbexPeak", ALC889A_INTEL), - SND_PCI_QUIRK(0x8086, 0xd601, "D102GGC", ALC883_3ST_6ch), + SND_PCI_QUIRK(0x8086, 0xd601, "D102GGC", ALC882_6ST_DIG), {} }; -- cgit v1.2.3 From fb40b496ad8bbe60a60c25eb2fce20f3cc114679 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 16 Mar 2010 09:46:23 +0300 Subject: sound: sequencer: clean up remove bogus check A few lines earlier bend is limited to 2399. So semitones is always less than 24 here. Signed-off-by: Dan Carpenter Signed-off-by: Takashi Iwai --- sound/oss/sequencer.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/sound/oss/sequencer.c b/sound/oss/sequencer.c index c79874696bec..e85789e53816 100644 --- a/sound/oss/sequencer.c +++ b/sound/oss/sequencer.c @@ -1631,8 +1631,6 @@ unsigned long compute_finetune(unsigned long base_freq, int bend, int range, } semitones = bend / 100; - if (semitones > 99) - semitones = 99; cents = bend % 100; amount = (int) (semitone_tuning[semitones] * multiplier * cent_tuning[cents]) / 10000; -- cgit v1.2.3 From 935050daad4c0ce687f7111995ed7791796deff9 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Tue, 16 Mar 2010 00:33:37 -0700 Subject: drivers/serial/sunsab.c: adjust the constant used to initialize the interrupt_mask0 fields From: Julia Lawall SAB82532_ISR0_TCD is declared in drivers/serial/subsab.h as relating to a status register, while SAB82532_IMR0_TCD is declared in the same file as relating to a mask register. The latter seems more appropriate for the interrupt_mask0 field, and follows the strategy for initializing this field elsewhere in the same file. Both SAB82532_ISR0_TCD and SAB82532_IMR0_TCD have the same value. Signed-off-by: Julia Lawall Signed-off-by: David S. Miller --- drivers/serial/sunsab.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c index d514e28d0755..d2e0321049e2 100644 --- a/drivers/serial/sunsab.c +++ b/drivers/serial/sunsab.c @@ -474,7 +474,7 @@ static void sunsab_stop_rx(struct uart_port *port) { struct uart_sunsab_port *up = (struct uart_sunsab_port *) port; - up->interrupt_mask0 |= SAB82532_ISR0_TCD; + up->interrupt_mask0 |= SAB82532_IMR0_TCD; writeb(up->interrupt_mask1, &up->regs->w.imr0); } -- cgit v1.2.3 From 6404fccafa3bfdc0602130a87f4d93baf928bea4 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 16 Mar 2010 01:00:17 -0700 Subject: MAINTAINERS: Add entry for sparc serial drivers. Signed-off-by: David S. Miller --- MAINTAINERS | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 34f52a14e051..a1cb1e6f6bee 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5136,6 +5136,21 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next-2.6.git S: Maintained F: arch/sparc/ +SPARC SERIAL DRIVERS +M: "David S. Miller" +L: sparclinux@vger.kernel.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next-2.6.git +S: Maintained +F: drivers/serial/suncore.c +F: drivers/serial/suncore.h +F: drivers/serial/sunhv.c +F: drivers/serial/sunsab.c +F: drivers/serial/sunsab.h +F: drivers/serial/sunsu.c +F: drivers/serial/sunzilog.c +F: drivers/serial/sunzilog.h + SPECIALIX IO8+ MULTIPORT SERIAL CARD DRIVER M: Roger Wolff S: Supported -- cgit v1.2.3 From e639ba481b76e445df354acd6e29d859a9b1657f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bruno=20Pr=C3=A9mont?= Date: Mon, 15 Mar 2010 19:00:27 +0100 Subject: HID: avoid '\0' in hid debugfs events file MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When dumping /sys/kernel/debug/hid/$device/events '\0' characters show up (invisible if cat to console but shown by less or while looking at a dump file). These are due to hid_debug_event() adding strlen()+1 bytes to the ring buffer (e.g. including the trailing '\0'). Any roll-over causes a '\0' as well as hid_debug_event() handles the ring buffers with HID_DEBUG_BUFSIZE-1 size while hid_debug_events_read() handles it with full HID_DEBUG_BUFSIZE size. Signed-off-by: Bruno Prémont Signed-off-by: Jiri Kosina --- drivers/hid/hid-debug.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index cd4ece6fdfb9..0c4e75573186 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -564,10 +564,10 @@ void hid_debug_event(struct hid_device *hdev, char *buf) struct hid_debug_list *list; list_for_each_entry(list, &hdev->debug_list, node) { - for (i = 0; i <= strlen(buf); i++) - list->hid_debug_buf[(list->tail + i) % (HID_DEBUG_BUFSIZE - 1)] = + for (i = 0; i < strlen(buf); i++) + list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] = buf[i]; - list->tail = (list->tail + i) % (HID_DEBUG_BUFSIZE - 1); + list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE; } } EXPORT_SYMBOL_GPL(hid_debug_event); -- cgit v1.2.3 From 4e06e240dcbb803433ee31bfe89a3e785a77cd3b Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Tue, 16 Mar 2010 15:57:44 +0100 Subject: PCMCIA: resource, fix lock imbalance Stanse found that one error path (when alloc_skb fails) in netdev_tx omits to unlock hw_priv->hwlock. Fix that by moving away from unlock in each fail path. Unlock at one place instead. Introduced in 94a819f80297e1f635a7cde4ed5317612e512ba7 (pcmcia: assert locking to struct pcmcia_device) Signed-off-by: Jiri Slaby Signed-off-by: Dominik Brodowski --- drivers/pcmcia/pcmcia_resource.c | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c index ef782c09f029..c4612c52e4cb 100644 --- a/drivers/pcmcia/pcmcia_resource.c +++ b/drivers/pcmcia/pcmcia_resource.c @@ -256,6 +256,7 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev, { struct pcmcia_socket *s; config_t *c; + int ret; s = p_dev->socket; @@ -264,13 +265,13 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev, if (!(s->state & SOCKET_PRESENT)) { dev_dbg(&s->dev, "No card present\n"); - mutex_unlock(&s->ops_mutex); - return -ENODEV; + ret = -ENODEV; + goto unlock; } if (!(c->state & CONFIG_LOCKED)) { dev_dbg(&s->dev, "Configuration isnt't locked\n"); - mutex_unlock(&s->ops_mutex); - return -EACCES; + ret = -EACCES; + goto unlock; } if (mod->Attributes & CONF_IRQ_CHANGE_VALID) { @@ -286,7 +287,8 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev, if (mod->Attributes & CONF_VCC_CHANGE_VALID) { dev_dbg(&s->dev, "changing Vcc is not allowed at this time\n"); - return -EINVAL; + ret = -EINVAL; + goto unlock; } /* We only allow changing Vpp1 and Vpp2 to the same value */ @@ -294,21 +296,21 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev, (mod->Attributes & CONF_VPP2_CHANGE_VALID)) { if (mod->Vpp1 != mod->Vpp2) { dev_dbg(&s->dev, "Vpp1 and Vpp2 must be the same\n"); - mutex_unlock(&s->ops_mutex); - return -EINVAL; + ret = -EINVAL; + goto unlock; } s->socket.Vpp = mod->Vpp1; if (s->ops->set_socket(s, &s->socket)) { - mutex_unlock(&s->ops_mutex); dev_printk(KERN_WARNING, &s->dev, "Unable to set VPP\n"); - return -EIO; + ret = -EIO; + goto unlock; } } else if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) || (mod->Attributes & CONF_VPP2_CHANGE_VALID)) { dev_dbg(&s->dev, "changing Vcc is not allowed at this time\n"); - mutex_unlock(&s->ops_mutex); - return -EINVAL; + ret = -EINVAL; + goto unlock; } if (mod->Attributes & CONF_IO_CHANGE_WIDTH) { @@ -332,9 +334,11 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev, s->ops->set_io_map(s, &io_on); } } + ret = 0; +unlock: mutex_unlock(&s->ops_mutex); - return 0; + return ret; } /* modify_configuration */ EXPORT_SYMBOL(pcmcia_modify_configuration); -- cgit v1.2.3 From 854d2c3531e6d32e76b94ca5e096ea54c7497e40 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 16 Mar 2010 00:02:25 +0000 Subject: Btrfs: fix search_ioctl key advance key->type is u8, not u64. fs/btrfs/ioctl.c: In function 'copy_to_sk': fs/btrfs/ioctl.c:1024: warning: comparison is always true due to limited range of data type Signed-off-by: Sage Weil Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 363e209679b6..38a68863390a 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1000,7 +1000,7 @@ static noinline int copy_to_sk(struct btrfs_root *root, advance_key: if (key->offset < (u64)-1) key->offset++; - else if (key->type < (u64)-1) + else if (key->type < (u8)-1) key->type++; else if (key->objectid < (u64)-1) key->objectid++; -- cgit v1.2.3 From ce769a2904bf5a9110ef534a7702397e38e2b3e9 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 16 Mar 2010 00:02:26 +0000 Subject: Btrfs: use __u64 types in ioctl.h Signed-off-by: Sage Weil Signed-off-by: Chris Mason --- fs/btrfs/ioctl.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h index 81eac0a59ea3..424694aa517f 100644 --- a/fs/btrfs/ioctl.h +++ b/fs/btrfs/ioctl.h @@ -127,14 +127,14 @@ struct btrfs_ioctl_defrag_range_args { }; struct btrfs_ioctl_space_info { - u64 flags; - u64 total_bytes; - u64 used_bytes; + __u64 flags; + __u64 total_bytes; + __u64 used_bytes; }; struct btrfs_ioctl_space_args { - u64 space_slots; - u64 total_spaces; + __u64 space_slots; + __u64 total_spaces; struct btrfs_ioctl_space_info spaces[0]; }; -- cgit v1.2.3 From 7fde62bffb576d384ea49a3aed3403d5609ee5bc Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Tue, 16 Mar 2010 15:40:10 -0400 Subject: Btrfs: buffer results in the space_info ioctl The space_info ioctl was using copy_to_user inside rcu_read_lock. This commit changes things to copy into a buffer first and then dump the result down to userland. Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 57 +++++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 46 insertions(+), 11 deletions(-) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 38a68863390a..4329610b141b 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1848,39 +1848,74 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) struct btrfs_ioctl_space_args space_args; struct btrfs_ioctl_space_info space; struct btrfs_ioctl_space_info *dest; + struct btrfs_ioctl_space_info *dest_orig; + struct btrfs_ioctl_space_info *user_dest; struct btrfs_space_info *info; + int alloc_size; int ret = 0; + int slot_count = 0; if (copy_from_user(&space_args, (struct btrfs_ioctl_space_args __user *)arg, sizeof(space_args))) return -EFAULT; + /* first we count slots */ + rcu_read_lock(); + list_for_each_entry_rcu(info, &root->fs_info->space_info, list) + slot_count++; + rcu_read_unlock(); + + /* space_slots == 0 means they are asking for a count */ + if (space_args.space_slots == 0) { + space_args.total_spaces = slot_count; + goto out; + } + alloc_size = sizeof(*dest) * slot_count; + /* we generally have at most 6 or so space infos, one for each raid + * level. So, a whole page should be more than enough for everyone + */ + if (alloc_size > PAGE_CACHE_SIZE) + return -ENOMEM; + space_args.total_spaces = 0; - dest = (struct btrfs_ioctl_space_info *) - (arg + sizeof(struct btrfs_ioctl_space_args)); + dest = kmalloc(alloc_size, GFP_NOFS); + if (!dest) + return -ENOMEM; + dest_orig = dest; + /* now we have a buffer to copy into */ rcu_read_lock(); list_for_each_entry_rcu(info, &root->fs_info->space_info, list) { - if (!space_args.space_slots) { - space_args.total_spaces++; - continue; - } + /* make sure we don't copy more than we allocated + * in our buffer + */ + if (slot_count == 0) + break; + slot_count--; + + /* make sure userland has enough room in their buffer */ if (space_args.total_spaces >= space_args.space_slots) break; + space.flags = info->flags; space.total_bytes = info->total_bytes; space.used_bytes = info->bytes_used; - if (copy_to_user(dest, &space, sizeof(space))) { - ret = -EFAULT; - break; - } + memcpy(dest, &space, sizeof(space)); dest++; space_args.total_spaces++; } rcu_read_unlock(); - if (copy_to_user(arg, &space_args, sizeof(space_args))) + user_dest = (struct btrfs_ioctl_space_info *) + (arg + sizeof(struct btrfs_ioctl_space_args)); + + if (copy_to_user(user_dest, dest_orig, alloc_size)) + ret = -EFAULT; + + kfree(dest_orig); +out: + if (ret == 0 && copy_to_user(arg, &space_args, sizeof(space_args))) ret = -EFAULT; return ret; -- cgit v1.2.3 From e7fb9c4ad351a8da7c09e182bd2e7ccd043daf08 Mon Sep 17 00:00:00 2001 From: Alberto Panizzo Date: Fri, 18 Dec 2009 16:42:11 +0100 Subject: backlight: Add Epson L4F00242T03 LCD driver The Epson LCD L4F00242T03 is mounted on the Freescale i.MX31 PDK board. Based upon Marek Vasut work in l4f00242t03.c, this driver provides basic init and power on/off functionality for this device through the sysfs lcd interface. Unfortunately Datasheet for this device are not available and all the control sequences sent to the display were copied from the freescale driver that in the i.MX31 Linux BSP. As in the i.MX31PDK board the core and io suppliers are voltage regulators, that functionality is embedded here, but not strict. Signed-off-by: Alberto Panizzo Signed-off-by: Richard Purdie --- drivers/video/backlight/Kconfig | 7 + drivers/video/backlight/Makefile | 1 + drivers/video/backlight/l4f00242t03.c | 256 ++++++++++++++++++++++++++++++++++ include/linux/spi/l4f00242t03.h | 31 ++++ 4 files changed, 295 insertions(+) create mode 100644 drivers/video/backlight/l4f00242t03.c create mode 100644 include/linux/spi/l4f00242t03.h diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig index 0c77fc610212..c025c84601b0 100644 --- a/drivers/video/backlight/Kconfig +++ b/drivers/video/backlight/Kconfig @@ -31,6 +31,13 @@ config LCD_CORGI Say y here to support the LCD panels usually found on SHARP corgi (C7x0) and spitz (Cxx00) models. +config LCD_L4F00242T03 + tristate "Epson L4F00242T03 LCD" + depends on LCD_CLASS_DEVICE && SPI_MASTER && GENERIC_GPIO + help + SPI driver for Epson L4F00242T03. This provides basic support + for init and powering the LCD up/down through a sysfs interface. + config LCD_LMS283GF05 tristate "Samsung LMS283GF05 LCD" depends on LCD_CLASS_DEVICE && SPI_MASTER && GENERIC_GPIO diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile index 6c704d41462d..09d1f14d6257 100644 --- a/drivers/video/backlight/Makefile +++ b/drivers/video/backlight/Makefile @@ -3,6 +3,7 @@ obj-$(CONFIG_LCD_CLASS_DEVICE) += lcd.o obj-$(CONFIG_LCD_CORGI) += corgi_lcd.o obj-$(CONFIG_LCD_HP700) += jornada720_lcd.o +obj-$(CONFIG_LCD_L4F00242T03) += l4f00242t03.o obj-$(CONFIG_LCD_LMS283GF05) += lms283gf05.o obj-$(CONFIG_LCD_LTV350QV) += ltv350qv.o obj-$(CONFIG_LCD_ILI9320) += ili9320.o diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c new file mode 100644 index 000000000000..42d061e1403b --- /dev/null +++ b/drivers/video/backlight/l4f00242t03.c @@ -0,0 +1,256 @@ +/* + * l4f00242t03.c -- support for Epson L4F00242T03 LCD + * + * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved. + * + * Copyright (c) 2009 Alberto Panizzo + * Inspired by Marek Vasut work in l4f00242t03.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +struct l4f00242t03_priv { + struct spi_device *spi; + struct lcd_device *ld; + int lcd_on:1; + struct regulator *io_reg; + struct regulator *core_reg; +}; + + +static void l4f00242t03_reset(unsigned int gpio) +{ + pr_debug("l4f00242t03_reset.\n"); + gpio_set_value(gpio, 1); + mdelay(100); + gpio_set_value(gpio, 0); + mdelay(10); /* tRES >= 100us */ + gpio_set_value(gpio, 1); + mdelay(20); +} + +#define param(x) ((x) | 0x100) + +static void l4f00242t03_lcd_init(struct spi_device *spi) +{ + struct l4f00242t03_pdata *pdata = spi->dev.platform_data; + struct l4f00242t03_priv *priv = dev_get_drvdata(&spi->dev); + const u16 cmd[] = { 0x36, param(0), 0x3A, param(0x60) }; + + dev_dbg(&spi->dev, "initializing LCD\n"); + + if (priv->io_reg) { + regulator_set_voltage(priv->io_reg, 1800000, 1800000); + regulator_enable(priv->io_reg); + } + + if (priv->core_reg) { + regulator_set_voltage(priv->core_reg, 2800000, 2800000); + regulator_enable(priv->core_reg); + } + + gpio_set_value(pdata->data_enable_gpio, 1); + msleep(60); + spi_write(spi, (const u8 *)cmd, ARRAY_SIZE(cmd) * sizeof(u16)); +} + +static int l4f00242t03_lcd_power_set(struct lcd_device *ld, int power) +{ + struct l4f00242t03_priv *priv = lcd_get_data(ld); + struct spi_device *spi = priv->spi; + + const u16 slpout = 0x11; + const u16 dison = 0x29; + + const u16 slpin = 0x10; + const u16 disoff = 0x28; + + if (power) { + if (priv->lcd_on) + return 0; + + dev_dbg(&spi->dev, "turning on LCD\n"); + + spi_write(spi, (const u8 *)&slpout, sizeof(u16)); + msleep(60); + spi_write(spi, (const u8 *)&dison, sizeof(u16)); + + priv->lcd_on = 1; + } else { + if (!priv->lcd_on) + return 0; + + dev_dbg(&spi->dev, "turning off LCD\n"); + + spi_write(spi, (const u8 *)&disoff, sizeof(u16)); + msleep(60); + spi_write(spi, (const u8 *)&slpin, sizeof(u16)); + + priv->lcd_on = 0; + } + + return 0; +} + +static struct lcd_ops l4f_ops = { + .set_power = l4f00242t03_lcd_power_set, + .get_power = NULL, +}; + +static int __devinit l4f00242t03_probe(struct spi_device *spi) +{ + struct l4f00242t03_priv *priv; + struct l4f00242t03_pdata *pdata = spi->dev.platform_data; + int ret; + + if (pdata == NULL) { + dev_err(&spi->dev, "Uninitialized platform data.\n"); + return -EINVAL; + } + + priv = kzalloc(sizeof(struct l4f00242t03_priv), GFP_KERNEL); + + if (priv == NULL) { + dev_err(&spi->dev, "No memory for this device.\n"); + ret = -ENOMEM; + goto err; + } + + dev_set_drvdata(&spi->dev, priv); + spi->bits_per_word = 9; + spi_setup(spi); + + priv->spi = spi; + + ret = gpio_request(pdata->reset_gpio, "lcd l4f00242t03 reset"); + if (ret) { + dev_err(&spi->dev, + "Unable to get the lcd l4f00242t03 reset gpio.\n"); + return ret; + } + + ret = gpio_direction_output(pdata->reset_gpio, 1); + if (ret) + goto err2; + + ret = gpio_request(pdata->data_enable_gpio, + "lcd l4f00242t03 data enable"); + if (ret) { + dev_err(&spi->dev, + "Unable to get the lcd l4f00242t03 data en gpio.\n"); + return ret; + } + + ret = gpio_direction_output(pdata->data_enable_gpio, 0); + if (ret) + goto err3; + + if (pdata->io_supply) { + priv->io_reg = regulator_get(NULL, pdata->io_supply); + + if (IS_ERR(priv->io_reg)) { + pr_err("%s: Unable to get the IO regulator\n", + __func__); + goto err3; + } + } + + if (pdata->core_supply) { + priv->core_reg = regulator_get(NULL, pdata->core_supply); + + if (IS_ERR(priv->core_reg)) { + pr_err("%s: Unable to get the core regulator\n", + __func__); + goto err4; + } + } + + priv->ld = lcd_device_register("l4f00242t03", + &spi->dev, priv, &l4f_ops); + if (IS_ERR(priv->ld)) { + ret = PTR_ERR(priv->ld); + goto err5; + } + + /* Init the LCD */ + l4f00242t03_reset(pdata->reset_gpio); + l4f00242t03_lcd_init(spi); + l4f00242t03_lcd_power_set(priv->ld, 1); + + dev_info(&spi->dev, "Epson l4f00242t03 lcd probed.\n"); + + return 0; + +err5: + if (priv->core_reg) + regulator_put(priv->core_reg); +err4: + if (priv->io_reg) + regulator_put(priv->io_reg); +err3: + gpio_free(pdata->data_enable_gpio); +err2: + gpio_free(pdata->reset_gpio); +err: + kfree(priv); + + return ret; +} + +static int __devexit l4f00242t03_remove(struct spi_device *spi) +{ + struct l4f00242t03_priv *priv = dev_get_drvdata(&spi->dev); + struct l4f00242t03_pdata *pdata = priv->spi->dev.platform_data; + + l4f00242t03_lcd_power_set(priv->ld, 0); + lcd_device_unregister(priv->ld); + + gpio_free(pdata->data_enable_gpio); + gpio_free(pdata->reset_gpio); + + if (priv->io_reg) + regulator_put(priv->core_reg); + if (priv->core_reg) + regulator_put(priv->io_reg); + + kfree(priv); + + return 0; +} + +static struct spi_driver l4f00242t03_driver = { + .driver = { + .name = "l4f00242t03", + .owner = THIS_MODULE, + }, + .probe = l4f00242t03_probe, + .remove = __devexit_p(l4f00242t03_remove), +}; + +static __init int l4f00242t03_init(void) +{ + return spi_register_driver(&l4f00242t03_driver); +} + +static __exit void l4f00242t03_exit(void) +{ + spi_unregister_driver(&l4f00242t03_driver); +} + +module_init(l4f00242t03_init); +module_exit(l4f00242t03_exit); + +MODULE_AUTHOR("Alberto Panizzo "); +MODULE_DESCRIPTION("EPSON L4F00242T03 LCD"); diff --git a/include/linux/spi/l4f00242t03.h b/include/linux/spi/l4f00242t03.h new file mode 100644 index 000000000000..aee1dbda4edc --- /dev/null +++ b/include/linux/spi/l4f00242t03.h @@ -0,0 +1,31 @@ +/* + * l4f00242t03.h -- Platform glue for Epson L4F00242T03 LCD + * + * Copyright (c) 2009 Alberto Panizzo + * Based on Marek Vasut work in lms283gf05.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +#ifndef _INCLUDE_LINUX_SPI_L4F00242T03_H_ +#define _INCLUDE_LINUX_SPI_L4F00242T03_H_ + +struct l4f00242t03_pdata { + unsigned int reset_gpio; + unsigned int data_enable_gpio; + const char *io_supply; /* will be set to 1.8 V */ + const char *core_supply; /* will be set to 2.8 V */ +}; + +#endif /* _INCLUDE_LINUX_SPI_L4F00242T03_H_ */ -- cgit v1.2.3 From b4144e4f6e3b448d322095ca08af393682a69e33 Mon Sep 17 00:00:00 2001 From: Richard Purdie Date: Mon, 18 Jan 2010 14:16:07 +0000 Subject: backlight: Revert some const qualifiers 9905a43b2d563e6f89e4c63c4278ada03f2ebb14 went a little to far with const qualifiers as there are legitiment cases where the function pointers can change (machine specific setup code for example). Signed-off-by: Richard Purdie --- include/linux/backlight.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 8c4f884db6b4..ee377d791996 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -36,18 +36,18 @@ struct backlight_device; struct fb_info; struct backlight_ops { - const unsigned int options; + unsigned int options; #define BL_CORE_SUSPENDRESUME (1 << 0) /* Notify the backlight driver some property has changed */ - int (* const update_status)(struct backlight_device *); + int (*update_status)(struct backlight_device *); /* Return the current backlight brightness (accounting for power, fb_blank etc.) */ - int (* const get_brightness)(struct backlight_device *); + int (*get_brightness)(struct backlight_device *); /* Check if given framebuffer device is the one bound to this backlight; return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */ - int (* const check_fb)(struct fb_info *); + int (*check_fb)(struct fb_info *); }; /* This structure defines all the properties of a backlight */ -- cgit v1.2.3 From c3cf2e44d3bbc694eccef33b0f2fe8e2d89baae7 Mon Sep 17 00:00:00 2001 From: Alberto Panizzo Date: Tue, 19 Jan 2010 09:30:50 +0100 Subject: backlight: l4f00242t03: Fix module licence absence. Signed-off-by: Alberto Panizzo Signed-off-by: Richard Purdie --- drivers/video/backlight/l4f00242t03.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c index 42d061e1403b..74abd6994b09 100644 --- a/drivers/video/backlight/l4f00242t03.c +++ b/drivers/video/backlight/l4f00242t03.c @@ -254,3 +254,4 @@ module_exit(l4f00242t03_exit); MODULE_AUTHOR("Alberto Panizzo "); MODULE_DESCRIPTION("EPSON L4F00242T03 LCD"); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From a4ebb780e194e8751dc22deeabcddd3fdc8f18f0 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Tue, 2 Feb 2010 14:44:50 -0800 Subject: video: backlight/progear, fix pci device refcounting Stanse found an ommitted pci_dev_puts on error path in progearbl_probe. pmu_dev and sb_dev are gotten, but never put when backlight_device_register fails. So unify fail paths and put the devs when the failure occurs. Signed-off-by: Jiri Slaby Signed-off-by: Andrew Morton Signed-off-by: Richard Purdie --- drivers/video/backlight/progear_bl.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c index 075786e05034..2ec16deb2397 100644 --- a/drivers/video/backlight/progear_bl.c +++ b/drivers/video/backlight/progear_bl.c @@ -63,6 +63,7 @@ static int progearbl_probe(struct platform_device *pdev) { u8 temp; struct backlight_device *progear_backlight_device; + int ret; pmu_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, NULL); if (!pmu_dev) { @@ -73,8 +74,8 @@ static int progearbl_probe(struct platform_device *pdev) sb_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); if (!sb_dev) { printk("ALI 1533 SB not found.\n"); - pci_dev_put(pmu_dev); - return -ENODEV; + ret = -ENODEV; + goto put_pmu; } /* Set SB_MPS1 to enable brightness control. */ @@ -84,8 +85,10 @@ static int progearbl_probe(struct platform_device *pdev) progear_backlight_device = backlight_device_register("progear-bl", &pdev->dev, NULL, &progearbl_ops); - if (IS_ERR(progear_backlight_device)) - return PTR_ERR(progear_backlight_device); + if (IS_ERR(progear_backlight_device)) { + ret = PTR_ERR(progear_backlight_device); + goto put_sb; + } platform_set_drvdata(pdev, progear_backlight_device); @@ -95,6 +98,11 @@ static int progearbl_probe(struct platform_device *pdev) progearbl_set_intensity(progear_backlight_device); return 0; +put_sb: + pci_dev_put(sb_dev); +put_pmu: + pci_dev_put(pmu_dev); + return ret; } static int progearbl_remove(struct platform_device *pdev) -- cgit v1.2.3 From 57e148b6a975980944f4466ccb669b1d02dfc6a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bruno=20Pr=C3=A9mont?= Date: Sun, 21 Feb 2010 00:20:01 +0100 Subject: backlight: Add backlight_device parameter to check_fb MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit check_fb from backlight_ops lacks a reference to the backlight_device that's being referred to. Add this parameter so a backlight_device can be mapped to a single framebuffer, especially if the same driver handles multiple devices on a single system. Signed-off-by: Bruno Prémont Signed-off-by: Richard Purdie --- drivers/video/backlight/adx_bl.c | 2 +- drivers/video/backlight/backlight.c | 2 +- include/linux/backlight.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c index d769b0bab21a..a683dd1be4bc 100644 --- a/drivers/video/backlight/adx_bl.c +++ b/drivers/video/backlight/adx_bl.c @@ -56,7 +56,7 @@ static int adx_backlight_get_brightness(struct backlight_device *bldev) return brightness & 0xff; } -static int adx_backlight_check_fb(struct fb_info *fb) +static int adx_backlight_check_fb(struct backlight_device *bldev, struct fb_info *fb) { return 1; } diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c index 18829cf68b1b..b800cd4eeec8 100644 --- a/drivers/video/backlight/backlight.c +++ b/drivers/video/backlight/backlight.c @@ -38,7 +38,7 @@ static int fb_notifier_callback(struct notifier_block *self, mutex_lock(&bd->ops_lock); if (bd->ops) if (!bd->ops->check_fb || - bd->ops->check_fb(evdata->info)) { + bd->ops->check_fb(bd, evdata->info)) { bd->props.fb_blank = *(int *)evdata->data; if (bd->props.fb_blank == FB_BLANK_UNBLANK) bd->props.state &= ~BL_CORE_FBBLANK; diff --git a/include/linux/backlight.h b/include/linux/backlight.h index ee377d791996..21cd866d24cd 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -47,7 +47,7 @@ struct backlight_ops { int (*get_brightness)(struct backlight_device *); /* Check if given framebuffer device is the one bound to this backlight; return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */ - int (*check_fb)(struct fb_info *); + int (*check_fb)(struct backlight_device *, struct fb_info *); }; /* This structure defines all the properties of a backlight */ -- cgit v1.2.3 From a19a6ee6cad2b20292a774c2f56ba8039b0fac9c Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Wed, 17 Feb 2010 16:39:44 -0500 Subject: backlight: Allow properties to be passed at registration Values such as max_brightness should be set before backlights are registered, but the current API doesn't allow that. Add a parameter to backlight_device_register and update drivers to ensure that they set this correctly. Signed-off-by: Matthew Garrett Signed-off-by: Richard Purdie --- drivers/acpi/video.c | 9 ++++++--- drivers/gpu/drm/nouveau/nouveau_backlight.c | 12 ++++++++---- drivers/macintosh/via-pmu-backlight.c | 7 +++++-- drivers/platform/x86/acer-wmi.c | 7 +++++-- drivers/platform/x86/asus-laptop.c | 7 +++++-- drivers/platform/x86/asus_acpi.c | 7 +++++-- drivers/platform/x86/classmate-laptop.c | 8 +++++--- drivers/platform/x86/compal-laptop.c | 11 +++++++---- drivers/platform/x86/dell-laptop.c | 13 ++++++++----- drivers/platform/x86/eeepc-laptop.c | 8 +++++--- drivers/platform/x86/fujitsu-laptop.c | 14 +++++++++----- drivers/platform/x86/msi-laptop.c | 7 +++++-- drivers/platform/x86/msi-wmi.c | 9 ++++++--- drivers/platform/x86/panasonic-laptop.c | 24 ++++++++++++------------ drivers/platform/x86/sony-laptop.c | 8 +++++--- drivers/platform/x86/thinkpad_acpi.c | 12 +++++++----- drivers/platform/x86/toshiba_acpi.c | 10 ++++++---- drivers/staging/samsung-laptop/samsung-laptop.c | 7 +++++-- drivers/usb/misc/appledisplay.c | 7 ++++--- drivers/video/atmel_lcdfb.c | 8 +++++--- drivers/video/aty/aty128fb.c | 7 +++++-- drivers/video/aty/atyfb_base.c | 7 +++++-- drivers/video/aty/radeon_backlight.c | 7 +++++-- drivers/video/backlight/88pm860x_bl.c | 6 ++++-- drivers/video/backlight/adp5520_bl.c | 11 ++++++----- drivers/video/backlight/adx_bl.c | 8 +++++--- drivers/video/backlight/atmel-pwm-bl.c | 8 +++++--- drivers/video/backlight/backlight.c | 8 +++++++- drivers/video/backlight/corgi_lcd.c | 8 +++++--- drivers/video/backlight/cr_bllcd.c | 8 ++++---- drivers/video/backlight/da903x_bl.c | 7 ++++--- drivers/video/backlight/generic_bl.c | 8 +++++--- drivers/video/backlight/hp680_bl.c | 8 +++++--- drivers/video/backlight/jornada720_bl.c | 7 +++++-- drivers/video/backlight/kb3886_bl.c | 8 ++++++-- drivers/video/backlight/locomolcd.c | 8 ++++++-- drivers/video/backlight/max8925_bl.c | 6 ++++-- drivers/video/backlight/mbp_nvidia_bl.c | 10 +++++++--- drivers/video/backlight/omap1_bl.c | 7 +++++-- drivers/video/backlight/progear_bl.c | 7 +++++-- drivers/video/backlight/pwm_bl.c | 8 +++++--- drivers/video/backlight/tosa_bl.c | 8 +++++--- drivers/video/backlight/wm831x_bl.c | 7 ++++--- drivers/video/bf54x-lq043fb.c | 9 +++++---- drivers/video/bfin-t350mcqb-fb.c | 9 +++++---- drivers/video/nvidia/nv_backlight.c | 7 +++++-- drivers/video/omap2/displays/panel-taal.c | 15 +++++++++------ drivers/video/riva/fbdev.c | 7 +++++-- include/linux/backlight.h | 3 ++- 49 files changed, 271 insertions(+), 151 deletions(-) diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 2ff2b6ab5b6c..cbe6f3924a10 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -998,6 +998,7 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) } if (acpi_video_backlight_support()) { + struct backlight_properties props; int result; static int count = 0; char *name; @@ -1010,12 +1011,14 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) return; sprintf(name, "acpi_video%d", count++); - device->backlight = backlight_device_register(name, - NULL, device, &acpi_backlight_ops); + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = device->brightness->count - 3; + device->backlight = backlight_device_register(name, NULL, device, + &acpi_backlight_ops, + &props); kfree(name); if (IS_ERR(device->backlight)) return; - device->backlight->props.max_brightness = device->brightness->count-3; result = sysfs_create_link(&device->backlight->dev.kobj, &device->dev->dev.kobj, "device"); diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index 20564f8cb0ec..406228f4a2a0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c @@ -89,19 +89,21 @@ static struct backlight_ops nv50_bl_ops = { static int nouveau_nv40_backlight_init(struct drm_device *dev) { + struct backlight_properties props; struct drm_nouveau_private *dev_priv = dev->dev_private; struct backlight_device *bd; if (!(nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK)) return 0; + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = 31; bd = backlight_device_register("nv_backlight", &dev->pdev->dev, dev, - &nv40_bl_ops); + &nv40_bl_ops, &props); if (IS_ERR(bd)) return PTR_ERR(bd); dev_priv->backlight = bd; - bd->props.max_brightness = 31; bd->props.brightness = nv40_get_intensity(bd); backlight_update_status(bd); @@ -110,19 +112,21 @@ static int nouveau_nv40_backlight_init(struct drm_device *dev) static int nouveau_nv50_backlight_init(struct drm_device *dev) { + struct backlight_properties props; struct drm_nouveau_private *dev_priv = dev->dev_private; struct backlight_device *bd; if (!nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT)) return 0; + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = 1025; bd = backlight_device_register("nv_backlight", &dev->pdev->dev, dev, - &nv50_bl_ops); + &nv50_bl_ops, &props); if (IS_ERR(bd)) return PTR_ERR(bd); dev_priv->backlight = bd; - bd->props.max_brightness = 1025; bd->props.brightness = nv50_get_intensity(bd); backlight_update_status(bd); return 0; diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c index 4f3c4479c16a..1cec02f6c431 100644 --- a/drivers/macintosh/via-pmu-backlight.c +++ b/drivers/macintosh/via-pmu-backlight.c @@ -144,6 +144,7 @@ void pmu_backlight_set_sleep(int sleep) void __init pmu_backlight_init() { + struct backlight_properties props; struct backlight_device *bd; char name[10]; int level, autosave; @@ -161,13 +162,15 @@ void __init pmu_backlight_init() snprintf(name, sizeof(name), "pmubl"); - bd = backlight_device_register(name, NULL, NULL, &pmu_backlight_data); + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = FB_BACKLIGHT_LEVELS - 1; + bd = backlight_device_register(name, NULL, NULL, &pmu_backlight_data, + &props); if (IS_ERR(bd)) { printk(KERN_ERR "PMU Backlight registration failed\n"); return; } uses_pmu_bl = 1; - bd->props.max_brightness = FB_BACKLIGHT_LEVELS - 1; pmu_backlight_init_curve(0x7F, 0x46, 0x0E); level = bd->props.max_brightness; diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index 226b3e93498c..cbca40aa4006 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -922,9 +922,13 @@ static struct backlight_ops acer_bl_ops = { static int __devinit acer_backlight_init(struct device *dev) { + struct backlight_properties props; struct backlight_device *bd; - bd = backlight_device_register("acer-wmi", dev, NULL, &acer_bl_ops); + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = max_brightness; + bd = backlight_device_register("acer-wmi", dev, NULL, &acer_bl_ops, + &props); if (IS_ERR(bd)) { printk(ACER_ERR "Could not register Acer backlight device\n"); acer_backlight_device = NULL; @@ -935,7 +939,6 @@ static int __devinit acer_backlight_init(struct device *dev) bd->props.power = FB_BLANK_UNBLANK; bd->props.brightness = read_brightness(bd); - bd->props.max_brightness = max_brightness; backlight_update_status(bd); return 0; } diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c index 791fcf321506..db5f7db2ba33 100644 --- a/drivers/platform/x86/asus-laptop.c +++ b/drivers/platform/x86/asus-laptop.c @@ -639,12 +639,16 @@ static int asus_backlight_init(struct asus_laptop *asus) { struct backlight_device *bd; struct device *dev = &asus->platform_device->dev; + struct backlight_properties props; if (!acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_GET, NULL) && !acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_SET, NULL) && lcd_switch_handle) { + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = 15; + bd = backlight_device_register(ASUS_LAPTOP_FILE, dev, - asus, &asusbl_ops); + asus, &asusbl_ops, &props); if (IS_ERR(bd)) { pr_err("Could not register asus backlight device\n"); asus->backlight_device = NULL; @@ -653,7 +657,6 @@ static int asus_backlight_init(struct asus_laptop *asus) asus->backlight_device = bd; - bd->props.max_brightness = 15; bd->props.power = FB_BLANK_UNBLANK; bd->props.brightness = asus_read_brightness(bd); backlight_update_status(bd); diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c index 1381430e1105..ee520357abaa 100644 --- a/drivers/platform/x86/asus_acpi.c +++ b/drivers/platform/x86/asus_acpi.c @@ -1481,6 +1481,7 @@ static void asus_acpi_exit(void) static int __init asus_acpi_init(void) { + struct backlight_properties props; int result; result = acpi_bus_register_driver(&asus_hotk_driver); @@ -1507,15 +1508,17 @@ static int __init asus_acpi_init(void) return -ENODEV; } + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = 15; asus_backlight_device = backlight_device_register("asus", NULL, NULL, - &asus_backlight_data); + &asus_backlight_data, + &props); if (IS_ERR(asus_backlight_device)) { printk(KERN_ERR "Could not register asus backlight device\n"); asus_backlight_device = NULL; asus_acpi_exit(); return -ENODEV; } - asus_backlight_device->props.max_brightness = 15; return 0; } diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c index 035a7dd65a3f..6670ed8f9e5b 100644 --- a/drivers/platform/x86/classmate-laptop.c +++ b/drivers/platform/x86/classmate-laptop.c @@ -462,11 +462,13 @@ static struct backlight_ops cmpc_bl_ops = { static int cmpc_bl_add(struct acpi_device *acpi) { + struct backlight_properties props; struct backlight_device *bd; - bd = backlight_device_register("cmpc_bl", &acpi->dev, - acpi->handle, &cmpc_bl_ops); - bd->props.max_brightness = 7; + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = 7; + bd = backlight_device_register("cmpc_bl", &acpi->dev, acpi->handle, + &cmpc_bl_ops, &props); dev_set_drvdata(&acpi->dev, bd); return 0; } diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c index 2740b40aad9b..71ff1545a93e 100644 --- a/drivers/platform/x86/compal-laptop.c +++ b/drivers/platform/x86/compal-laptop.c @@ -291,12 +291,15 @@ static int __init compal_init(void) /* Register backlight stuff */ if (!acpi_video_backlight_support()) { - compalbl_device = backlight_device_register("compal-laptop", NULL, NULL, - &compalbl_ops); + struct backlight_properties props; + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = COMPAL_LCD_LEVEL_MAX - 1; + compalbl_device = backlight_device_register("compal-laptop", + NULL, NULL, + &compalbl_ops, + &props); if (IS_ERR(compalbl_device)) return PTR_ERR(compalbl_device); - - compalbl_device->props.max_brightness = COMPAL_LCD_LEVEL_MAX-1; } ret = platform_driver_register(&compal_driver); diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index ef614979afe9..46435ac4684f 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -559,10 +559,14 @@ static int __init dell_init(void) release_buffer(); if (max_intensity) { - dell_backlight_device = backlight_device_register( - "dell_backlight", - &platform_device->dev, NULL, - &dell_ops); + struct backlight_properties props; + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = max_intensity; + dell_backlight_device = backlight_device_register("dell_backlight", + &platform_device->dev, + NULL, + &dell_ops, + &props); if (IS_ERR(dell_backlight_device)) { ret = PTR_ERR(dell_backlight_device); @@ -570,7 +574,6 @@ static int __init dell_init(void) goto fail_backlight; } - dell_backlight_device->props.max_brightness = max_intensity; dell_backlight_device->props.brightness = dell_get_intensity(dell_backlight_device); backlight_update_status(dell_backlight_device); diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c index 9a844caa3756..3fdf21e0052e 100644 --- a/drivers/platform/x86/eeepc-laptop.c +++ b/drivers/platform/x86/eeepc-laptop.c @@ -1131,18 +1131,20 @@ static int eeepc_backlight_notify(struct eeepc_laptop *eeepc) static int eeepc_backlight_init(struct eeepc_laptop *eeepc) { + struct backlight_properties props; struct backlight_device *bd; + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = 15; bd = backlight_device_register(EEEPC_LAPTOP_FILE, - &eeepc->platform_device->dev, - eeepc, &eeepcbl_ops); + &eeepc->platform_device->dev, eeepc, + &eeepcbl_ops, &props); if (IS_ERR(bd)) { pr_err("Could not register eeepc backlight device\n"); eeepc->backlight_device = NULL; return PTR_ERR(bd); } eeepc->backlight_device = bd; - bd->props.max_brightness = 15; bd->props.brightness = read_brightness(bd); bd->props.power = FB_BLANK_UNBLANK; backlight_update_status(bd); diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c index 5f3320d468f6..c1074b32490e 100644 --- a/drivers/platform/x86/fujitsu-laptop.c +++ b/drivers/platform/x86/fujitsu-laptop.c @@ -1126,16 +1126,20 @@ static int __init fujitsu_init(void) /* Register backlight stuff */ if (!acpi_video_backlight_support()) { - fujitsu->bl_device = - backlight_device_register("fujitsu-laptop", NULL, NULL, - &fujitsubl_ops); + struct backlight_properties props; + + memset(&props, 0, sizeof(struct backlight_properties)); + max_brightness = fujitsu->max_brightness; + props.max_brightness = max_brightness - 1; + fujitsu->bl_device = backlight_device_register("fujitsu-laptop", + NULL, NULL, + &fujitsubl_ops, + &props); if (IS_ERR(fujitsu->bl_device)) { ret = PTR_ERR(fujitsu->bl_device); fujitsu->bl_device = NULL; goto fail_sysfs_group; } - max_brightness = fujitsu->max_brightness; - fujitsu->bl_device->props.max_brightness = max_brightness - 1; fujitsu->bl_device->props.brightness = fujitsu->brightness_level; } diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c index c2b05da4289a..996223a7c009 100644 --- a/drivers/platform/x86/msi-laptop.c +++ b/drivers/platform/x86/msi-laptop.c @@ -683,11 +683,14 @@ static int __init msi_init(void) printk(KERN_INFO "MSI: Brightness ignored, must be controlled " "by ACPI video driver\n"); } else { + struct backlight_properties props; + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = MSI_LCD_LEVEL_MAX - 1; msibl_device = backlight_device_register("msi-laptop-bl", NULL, - NULL, &msibl_ops); + NULL, &msibl_ops, + &props); if (IS_ERR(msibl_device)) return PTR_ERR(msibl_device); - msibl_device->props.max_brightness = MSI_LCD_LEVEL_MAX-1; } ret = platform_driver_register(&msipf_driver); diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c index f5f70d4c6913..fb7ccaae6563 100644 --- a/drivers/platform/x86/msi-wmi.c +++ b/drivers/platform/x86/msi-wmi.c @@ -249,12 +249,15 @@ static int __init msi_wmi_init(void) goto err_uninstall_notifier; if (!acpi_video_backlight_support()) { - backlight = backlight_device_register(DRV_NAME, - NULL, NULL, &msi_backlight_ops); + struct backlight_properties props; + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = ARRAY_SIZE(backlight_map) - 1; + backlight = backlight_device_register(DRV_NAME, NULL, NULL, + &msi_backlight_ops, + &props); if (IS_ERR(backlight)) goto err_free_input; - backlight->props.max_brightness = ARRAY_SIZE(backlight_map) - 1; err = bl_get(NULL); if (err < 0) goto err_free_backlight; diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c index c9fc479fc290..ab5c9cea1462 100644 --- a/drivers/platform/x86/panasonic-laptop.c +++ b/drivers/platform/x86/panasonic-laptop.c @@ -600,6 +600,7 @@ static int acpi_pcc_hotkey_resume(struct acpi_device *device) static int acpi_pcc_hotkey_add(struct acpi_device *device) { + struct backlight_properties props; struct pcc_acpi *pcc; int num_sifr, result; @@ -637,24 +638,23 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device) if (result) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error installing keyinput handler\n")); - goto out_sinf; + goto out_hotkey; } - /* initialize backlight */ - pcc->backlight = backlight_device_register("panasonic", NULL, pcc, - &pcc_backlight_ops); - if (IS_ERR(pcc->backlight)) - goto out_input; - if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf)) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Couldn't retrieve BIOS data\n")); - goto out_backlight; + goto out_input; } + /* initialize backlight */ + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = pcc->sinf[SINF_AC_MAX_BRIGHT]; + pcc->backlight = backlight_device_register("panasonic", NULL, pcc, + &pcc_backlight_ops, &props); + if (IS_ERR(pcc->backlight)) + goto out_sinf; /* read the initial brightness setting from the hardware */ - pcc->backlight->props.max_brightness = - pcc->sinf[SINF_AC_MAX_BRIGHT]; pcc->backlight->props.brightness = pcc->sinf[SINF_AC_CUR_BRIGHT]; /* read the initial sticky key mode from the hardware */ @@ -669,12 +669,12 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device) out_backlight: backlight_device_unregister(pcc->backlight); +out_sinf: + kfree(pcc->sinf); out_input: input_unregister_device(pcc->input_dev); /* no need to input_free_device() since core input API refcount and * free()s the device */ -out_sinf: - kfree(pcc->sinf); out_hotkey: kfree(pcc); diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index 5a3d8514c66d..6553b91caaa4 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c @@ -1291,9 +1291,13 @@ static int sony_nc_add(struct acpi_device *device) "controlled by ACPI video driver\n"); } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT", &handle))) { + struct backlight_properties props; + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = SONY_MAX_BRIGHTNESS - 1; sony_backlight_device = backlight_device_register("sony", NULL, NULL, - &sony_backlight_ops); + &sony_backlight_ops, + &props); if (IS_ERR(sony_backlight_device)) { printk(KERN_WARNING DRV_PFX "unable to register backlight device\n"); @@ -1302,8 +1306,6 @@ static int sony_nc_add(struct acpi_device *device) sony_backlight_device->props.brightness = sony_backlight_get_brightness (sony_backlight_device); - sony_backlight_device->props.max_brightness = - SONY_MAX_BRIGHTNESS - 1; } } diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index c64e3528889b..770b85327f84 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -6170,6 +6170,7 @@ static const struct tpacpi_quirk brightness_quirk_table[] __initconst = { static int __init brightness_init(struct ibm_init_struct *iibm) { + struct backlight_properties props; int b; unsigned long quirks; @@ -6259,9 +6260,12 @@ static int __init brightness_init(struct ibm_init_struct *iibm) printk(TPACPI_INFO "detected a 16-level brightness capable ThinkPad\n"); - ibm_backlight_device = backlight_device_register( - TPACPI_BACKLIGHT_DEV_NAME, NULL, NULL, - &ibm_backlight_data); + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = (tp_features.bright_16levels) ? 15 : 7; + ibm_backlight_device = backlight_device_register(TPACPI_BACKLIGHT_DEV_NAME, + NULL, NULL, + &ibm_backlight_data, + &props); if (IS_ERR(ibm_backlight_device)) { int rc = PTR_ERR(ibm_backlight_device); ibm_backlight_device = NULL; @@ -6280,8 +6284,6 @@ static int __init brightness_init(struct ibm_init_struct *iibm) "or not on your ThinkPad\n", TPACPI_MAIL); } - ibm_backlight_device->props.max_brightness = - (tp_features.bright_16levels)? 15 : 7; ibm_backlight_device->props.brightness = b & TP_EC_BACKLIGHT_LVLMSK; backlight_update_status(ibm_backlight_device); diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 789240d1b577..def4841183be 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -924,6 +924,7 @@ static int __init toshiba_acpi_init(void) u32 hci_result; bool bt_present; int ret = 0; + struct backlight_properties props; if (acpi_disabled) return -ENODEV; @@ -974,10 +975,12 @@ static int __init toshiba_acpi_init(void) } } + props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1; toshiba_backlight_device = backlight_device_register("toshiba", - &toshiba_acpi.p_dev->dev, - NULL, - &toshiba_backlight_data); + &toshiba_acpi.p_dev->dev, + NULL, + &toshiba_backlight_data, + &props); if (IS_ERR(toshiba_backlight_device)) { ret = PTR_ERR(toshiba_backlight_device); @@ -986,7 +989,6 @@ static int __init toshiba_acpi_init(void) toshiba_acpi_exit(); return ret; } - toshiba_backlight_device->props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1; /* Register rfkill switch for Bluetooth */ if (hci_get_bt_present(&bt_present) == HCI_SUCCESS && bt_present) { diff --git a/drivers/staging/samsung-laptop/samsung-laptop.c b/drivers/staging/samsung-laptop/samsung-laptop.c index dd7ea4c075db..eb44b60e1eb5 100644 --- a/drivers/staging/samsung-laptop/samsung-laptop.c +++ b/drivers/staging/samsung-laptop/samsung-laptop.c @@ -394,6 +394,7 @@ MODULE_DEVICE_TABLE(dmi, samsung_dmi_table); static int __init samsung_init(void) { + struct backlight_properties props; struct sabi_retval sretval; const char *testStr = "SECLINUX"; void __iomem *memcheck; @@ -486,12 +487,14 @@ static int __init samsung_init(void) goto error_no_platform; /* create a backlight device to talk to this one */ + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = MAX_BRIGHT; backlight_device = backlight_device_register("samsung", &sdev->dev, - NULL, &backlight_ops); + NULL, &backlight_ops, + &props); if (IS_ERR(backlight_device)) goto error_no_backlight; - backlight_device->props.max_brightness = MAX_BRIGHT; backlight_device->props.brightness = read_brightness(); backlight_device->props.power = FB_BLANK_UNBLANK; backlight_update_status(backlight_device); diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c index 4d2952f1fb13..3adab041355a 100644 --- a/drivers/usb/misc/appledisplay.c +++ b/drivers/usb/misc/appledisplay.c @@ -202,6 +202,7 @@ static void appledisplay_work(struct work_struct *work) static int appledisplay_probe(struct usb_interface *iface, const struct usb_device_id *id) { + struct backlight_properties props; struct appledisplay *pdata; struct usb_device *udev = interface_to_usbdev(iface); struct usb_host_interface *iface_desc; @@ -279,16 +280,16 @@ static int appledisplay_probe(struct usb_interface *iface, /* Register backlight device */ snprintf(bl_name, sizeof(bl_name), "appledisplay%d", atomic_inc_return(&count_displays) - 1); + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = 0xff; pdata->bd = backlight_device_register(bl_name, NULL, pdata, - &appledisplay_bl_data); + &appledisplay_bl_data, &props); if (IS_ERR(pdata->bd)) { dev_err(&iface->dev, "Backlight registration failed\n"); retval = PTR_ERR(pdata->bd); goto error; } - pdata->bd->props.max_brightness = 0xff; - /* Try to get brightness */ brightness = appledisplay_bl_get_brightness(pdata->bd); diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c index 3d886c6902f9..11de3bfd4e54 100644 --- a/drivers/video/atmel_lcdfb.c +++ b/drivers/video/atmel_lcdfb.c @@ -117,6 +117,7 @@ static struct backlight_ops atmel_lcdc_bl_ops = { static void init_backlight(struct atmel_lcdfb_info *sinfo) { + struct backlight_properties props; struct backlight_device *bl; sinfo->bl_power = FB_BLANK_UNBLANK; @@ -124,8 +125,10 @@ static void init_backlight(struct atmel_lcdfb_info *sinfo) if (sinfo->backlight) return; - bl = backlight_device_register("backlight", &sinfo->pdev->dev, - sinfo, &atmel_lcdc_bl_ops); + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = 0xff; + bl = backlight_device_register("backlight", &sinfo->pdev->dev, sinfo, + &atmel_lcdc_bl_ops, &props); if (IS_ERR(bl)) { dev_err(&sinfo->pdev->dev, "error %ld on backlight register\n", PTR_ERR(bl)); @@ -135,7 +138,6 @@ static void init_backlight(struct atmel_lcdfb_info *sinfo) bl->props.power = FB_BLANK_UNBLANK; bl->props.fb_blank = FB_BLANK_UNBLANK; - bl->props.max_brightness = 0xff; bl->props.brightness = atmel_bl_get_brightness(bl); } diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c index 9ee67d6da710..a489be0c4614 100644 --- a/drivers/video/aty/aty128fb.c +++ b/drivers/video/aty/aty128fb.c @@ -1802,6 +1802,7 @@ static void aty128_bl_set_power(struct fb_info *info, int power) static void aty128_bl_init(struct aty128fb_par *par) { + struct backlight_properties props; struct fb_info *info = pci_get_drvdata(par->pdev); struct backlight_device *bd; char name[12]; @@ -1817,7 +1818,10 @@ static void aty128_bl_init(struct aty128fb_par *par) snprintf(name, sizeof(name), "aty128bl%d", info->node); - bd = backlight_device_register(name, info->dev, par, &aty128_bl_data); + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = FB_BACKLIGHT_LEVELS - 1; + bd = backlight_device_register(name, info->dev, par, &aty128_bl_data, + &props); if (IS_ERR(bd)) { info->bl_dev = NULL; printk(KERN_WARNING "aty128: Backlight registration failed\n"); @@ -1829,7 +1833,6 @@ static void aty128_bl_init(struct aty128fb_par *par) 63 * FB_BACKLIGHT_MAX / MAX_LEVEL, 219 * FB_BACKLIGHT_MAX / MAX_LEVEL); - bd->props.max_brightness = FB_BACKLIGHT_LEVELS - 1; bd->props.brightness = bd->props.max_brightness; bd->props.power = FB_BLANK_UNBLANK; backlight_update_status(bd); diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c index e45ab8db2ddc..29d72851f85b 100644 --- a/drivers/video/aty/atyfb_base.c +++ b/drivers/video/aty/atyfb_base.c @@ -2232,6 +2232,7 @@ static struct backlight_ops aty_bl_data = { static void aty_bl_init(struct atyfb_par *par) { + struct backlight_properties props; struct fb_info *info = pci_get_drvdata(par->pdev); struct backlight_device *bd; char name[12]; @@ -2243,7 +2244,10 @@ static void aty_bl_init(struct atyfb_par *par) snprintf(name, sizeof(name), "atybl%d", info->node); - bd = backlight_device_register(name, info->dev, par, &aty_bl_data); + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = FB_BACKLIGHT_LEVELS - 1; + bd = backlight_device_register(name, info->dev, par, &aty_bl_data, + &props); if (IS_ERR(bd)) { info->bl_dev = NULL; printk(KERN_WARNING "aty: Backlight registration failed\n"); @@ -2255,7 +2259,6 @@ static void aty_bl_init(struct atyfb_par *par) 0x3F * FB_BACKLIGHT_MAX / MAX_LEVEL, 0xFF * FB_BACKLIGHT_MAX / MAX_LEVEL); - bd->props.max_brightness = FB_BACKLIGHT_LEVELS - 1; bd->props.brightness = bd->props.max_brightness; bd->props.power = FB_BLANK_UNBLANK; backlight_update_status(bd); diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c index fa1198c4ccc5..9fc8c66be3ce 100644 --- a/drivers/video/aty/radeon_backlight.c +++ b/drivers/video/aty/radeon_backlight.c @@ -134,6 +134,7 @@ static struct backlight_ops radeon_bl_data = { void radeonfb_bl_init(struct radeonfb_info *rinfo) { + struct backlight_properties props; struct backlight_device *bd; struct radeon_bl_privdata *pdata; char name[12]; @@ -155,7 +156,10 @@ void radeonfb_bl_init(struct radeonfb_info *rinfo) snprintf(name, sizeof(name), "radeonbl%d", rinfo->info->node); - bd = backlight_device_register(name, rinfo->info->dev, pdata, &radeon_bl_data); + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = FB_BACKLIGHT_LEVELS - 1; + bd = backlight_device_register(name, rinfo->info->dev, pdata, + &radeon_bl_data, &props); if (IS_ERR(bd)) { rinfo->info->bl_dev = NULL; printk("radeonfb: Backlight registration failed\n"); @@ -185,7 +189,6 @@ void radeonfb_bl_init(struct radeonfb_info *rinfo) 63 * FB_BACKLIGHT_MAX / MAX_RADEON_LEVEL, 217 * FB_BACKLIGHT_MAX / MAX_RADEON_LEVEL); - bd->props.max_brightness = FB_BACKLIGHT_LEVELS - 1; bd->props.brightness = bd->props.max_brightness; bd->props.power = FB_BLANK_UNBLANK; backlight_update_status(bd); diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c index b8f705cca438..93e25c77aeb2 100644 --- a/drivers/video/backlight/88pm860x_bl.c +++ b/drivers/video/backlight/88pm860x_bl.c @@ -187,6 +187,7 @@ static int pm860x_backlight_probe(struct platform_device *pdev) struct pm860x_backlight_data *data; struct backlight_device *bl; struct resource *res; + struct backlight_properties props; unsigned char value; char name[MFD_NAME_SIZE]; int ret; @@ -223,14 +224,15 @@ static int pm860x_backlight_probe(struct platform_device *pdev) return -EINVAL; } + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = MAX_BRIGHTNESS; bl = backlight_device_register(name, &pdev->dev, data, - &pm860x_backlight_ops); + &pm860x_backlight_ops, &props); if (IS_ERR(bl)) { dev_err(&pdev->dev, "failed to register backlight\n"); kfree(data); return PTR_ERR(bl); } - bl->props.max_brightness = MAX_BRIGHTNESS; bl->props.brightness = MAX_BRIGHTNESS; platform_set_drvdata(pdev, bl); diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c index 86d95c228adb..5183f0e4d314 100644 --- a/drivers/video/backlight/adp5520_bl.c +++ b/drivers/video/backlight/adp5520_bl.c @@ -278,6 +278,7 @@ static const struct attribute_group adp5520_bl_attr_group = { static int __devinit adp5520_bl_probe(struct platform_device *pdev) { + struct backlight_properties props; struct backlight_device *bl; struct adp5520_bl *data; int ret = 0; @@ -300,17 +301,17 @@ static int __devinit adp5520_bl_probe(struct platform_device *pdev) mutex_init(&data->lock); - bl = backlight_device_register(pdev->name, data->master, - data, &adp5520_bl_ops); + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = ADP5020_MAX_BRIGHTNESS; + bl = backlight_device_register(pdev->name, data->master, data, + &adp5520_bl_ops, &props); if (IS_ERR(bl)) { dev_err(&pdev->dev, "failed to register backlight\n"); kfree(data); return PTR_ERR(bl); } - bl->props.max_brightness = - bl->props.brightness = ADP5020_MAX_BRIGHTNESS; - + bl->props.brightness = ADP5020_MAX_BRIGHTNESS; if (data->pdata->en_ambl_sens) ret = sysfs_create_group(&bl->dev.kobj, &adp5520_bl_attr_group); diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c index a683dd1be4bc..b0624b983889 100644 --- a/drivers/video/backlight/adx_bl.c +++ b/drivers/video/backlight/adx_bl.c @@ -70,6 +70,7 @@ static const struct backlight_ops adx_backlight_ops = { static int __devinit adx_backlight_probe(struct platform_device *pdev) { + struct backlight_properties props; struct backlight_device *bldev; struct resource *res; struct adxbl *bl; @@ -101,14 +102,15 @@ static int __devinit adx_backlight_probe(struct platform_device *pdev) goto out; } - bldev = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, bl, - &adx_backlight_ops); + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = 0xff; + bldev = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, + bl, &adx_backlight_ops, &props); if (!bldev) { ret = -ENOMEM; goto out; } - bldev->props.max_brightness = 0xff; bldev->props.brightness = 0xff; bldev->props.power = FB_BLANK_UNBLANK; diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c index f625ffc69ad3..2d9760551a4b 100644 --- a/drivers/video/backlight/atmel-pwm-bl.c +++ b/drivers/video/backlight/atmel-pwm-bl.c @@ -120,6 +120,7 @@ static const struct backlight_ops atmel_pwm_bl_ops = { static int atmel_pwm_bl_probe(struct platform_device *pdev) { + struct backlight_properties props; const struct atmel_pwm_bl_platform_data *pdata; struct backlight_device *bldev; struct atmel_pwm_bl *pwmbl; @@ -165,8 +166,10 @@ static int atmel_pwm_bl_probe(struct platform_device *pdev) goto err_free_gpio; } - bldev = backlight_device_register("atmel-pwm-bl", - &pdev->dev, pwmbl, &atmel_pwm_bl_ops); + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = pdata->pwm_duty_max - pdata->pwm_duty_min; + bldev = backlight_device_register("atmel-pwm-bl", &pdev->dev, pwmbl, + &atmel_pwm_bl_ops, &props); if (IS_ERR(bldev)) { retval = PTR_ERR(bldev); goto err_free_gpio; @@ -178,7 +181,6 @@ static int atmel_pwm_bl_probe(struct platform_device *pdev) /* Power up the backlight by default at middle intesity. */ bldev->props.power = FB_BLANK_UNBLANK; - bldev->props.max_brightness = pdata->pwm_duty_max - pdata->pwm_duty_min; bldev->props.brightness = bldev->props.max_brightness / 2; retval = atmel_pwm_bl_init_pwm(pwmbl); diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c index b800cd4eeec8..68bb838b9f11 100644 --- a/drivers/video/backlight/backlight.c +++ b/drivers/video/backlight/backlight.c @@ -269,7 +269,8 @@ EXPORT_SYMBOL(backlight_force_update); * ERR_PTR() or a pointer to the newly allocated device. */ struct backlight_device *backlight_device_register(const char *name, - struct device *parent, void *devdata, const struct backlight_ops *ops) + struct device *parent, void *devdata, const struct backlight_ops *ops, + const struct backlight_properties *props) { struct backlight_device *new_bd; int rc; @@ -289,6 +290,11 @@ struct backlight_device *backlight_device_register(const char *name, dev_set_name(&new_bd->dev, name); dev_set_drvdata(&new_bd->dev, devdata); + /* Set default properties */ + if (props) + memcpy(&new_bd->props, props, + sizeof(struct backlight_properties)); + rc = device_register(&new_bd->dev); if (rc) { kfree(new_bd); diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c index b4bcf8043797..73bdd8454c94 100644 --- a/drivers/video/backlight/corgi_lcd.c +++ b/drivers/video/backlight/corgi_lcd.c @@ -533,6 +533,7 @@ err_free_backlight_on: static int __devinit corgi_lcd_probe(struct spi_device *spi) { + struct backlight_properties props; struct corgi_lcd_platform_data *pdata = spi->dev.platform_data; struct corgi_lcd *lcd; int ret = 0; @@ -559,13 +560,14 @@ static int __devinit corgi_lcd_probe(struct spi_device *spi) lcd->power = FB_BLANK_POWERDOWN; lcd->mode = (pdata) ? pdata->init_mode : CORGI_LCD_MODE_VGA; - lcd->bl_dev = backlight_device_register("corgi_bl", &spi->dev, - lcd, &corgi_bl_ops); + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = pdata->max_intensity; + lcd->bl_dev = backlight_device_register("corgi_bl", &spi->dev, lcd, + &corgi_bl_ops, &props); if (IS_ERR(lcd->bl_dev)) { ret = PTR_ERR(lcd->bl_dev); goto err_unregister_lcd; } - lcd->bl_dev->props.max_brightness = pdata->max_intensity; lcd->bl_dev->props.brightness = pdata->default_intensity; lcd->bl_dev->props.power = FB_BLANK_UNBLANK; diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c index da86db4374a0..1cce6031bff2 100644 --- a/drivers/video/backlight/cr_bllcd.c +++ b/drivers/video/backlight/cr_bllcd.c @@ -170,6 +170,7 @@ static struct lcd_ops cr_lcd_ops = { static int cr_backlight_probe(struct platform_device *pdev) { + struct backlight_properties props; struct backlight_device *bdp; struct lcd_device *ldp; struct cr_panel *crp; @@ -190,8 +191,9 @@ static int cr_backlight_probe(struct platform_device *pdev) return -ENODEV; } - bdp = backlight_device_register("cr-backlight", - &pdev->dev, NULL, &cr_backlight_ops); + memset(&props, 0, sizeof(struct backlight_properties)); + bdp = backlight_device_register("cr-backlight", &pdev->dev, NULL, + &cr_backlight_ops, &props); if (IS_ERR(bdp)) { pci_dev_put(lpc_dev); return PTR_ERR(bdp); @@ -220,9 +222,7 @@ static int cr_backlight_probe(struct platform_device *pdev) crp->cr_lcd_device = ldp; crp->cr_backlight_device->props.power = FB_BLANK_UNBLANK; crp->cr_backlight_device->props.brightness = 0; - crp->cr_backlight_device->props.max_brightness = 0; cr_backlight_set_intensity(crp->cr_backlight_device); - cr_lcd_set_power(crp->cr_lcd_device, FB_BLANK_UNBLANK); platform_set_drvdata(pdev, crp); diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c index 74cdc640173d..686e4a789238 100644 --- a/drivers/video/backlight/da903x_bl.c +++ b/drivers/video/backlight/da903x_bl.c @@ -105,6 +105,7 @@ static int da903x_backlight_probe(struct platform_device *pdev) struct da9034_backlight_pdata *pdata = pdev->dev.platform_data; struct da903x_backlight_data *data; struct backlight_device *bl; + struct backlight_properties props; int max_brightness; data = kzalloc(sizeof(*data), GFP_KERNEL); @@ -134,15 +135,15 @@ static int da903x_backlight_probe(struct platform_device *pdev) da903x_write(data->da903x_dev, DA9034_WLED_CONTROL2, DA9034_WLED_ISET(pdata->output_current)); - bl = backlight_device_register(pdev->name, data->da903x_dev, - data, &da903x_backlight_ops); + props.max_brightness = max_brightness; + bl = backlight_device_register(pdev->name, data->da903x_dev, data, + &da903x_backlight_ops, &props); if (IS_ERR(bl)) { dev_err(&pdev->dev, "failed to register backlight\n"); kfree(data); return PTR_ERR(bl); } - bl->props.max_brightness = max_brightness; bl->props.brightness = max_brightness; platform_set_drvdata(pdev, bl); diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c index e6d348e63596..312ca619735d 100644 --- a/drivers/video/backlight/generic_bl.c +++ b/drivers/video/backlight/generic_bl.c @@ -78,6 +78,7 @@ static const struct backlight_ops genericbl_ops = { static int genericbl_probe(struct platform_device *pdev) { + struct backlight_properties props; struct generic_bl_info *machinfo = pdev->dev.platform_data; const char *name = "generic-bl"; struct backlight_device *bd; @@ -89,14 +90,15 @@ static int genericbl_probe(struct platform_device *pdev) if (machinfo->name) name = machinfo->name; - bd = backlight_device_register (name, - &pdev->dev, NULL, &genericbl_ops); + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = machinfo->max_intensity; + bd = backlight_device_register(name, &pdev->dev, NULL, &genericbl_ops, + &props); if (IS_ERR (bd)) return PTR_ERR (bd); platform_set_drvdata(pdev, bd); - bd->props.max_brightness = machinfo->max_intensity; bd->props.power = FB_BLANK_UNBLANK; bd->props.brightness = machinfo->default_intensity; backlight_update_status(bd); diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c index f7cc528d5be7..267d23f8d645 100644 --- a/drivers/video/backlight/hp680_bl.c +++ b/drivers/video/backlight/hp680_bl.c @@ -105,16 +105,18 @@ static const struct backlight_ops hp680bl_ops = { static int __devinit hp680bl_probe(struct platform_device *pdev) { + struct backlight_properties props; struct backlight_device *bd; - bd = backlight_device_register ("hp680-bl", &pdev->dev, NULL, - &hp680bl_ops); + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = HP680_MAX_INTENSITY; + bd = backlight_device_register("hp680-bl", &pdev->dev, NULL, + &hp680bl_ops, &props); if (IS_ERR(bd)) return PTR_ERR(bd); platform_set_drvdata(pdev, bd); - bd->props.max_brightness = HP680_MAX_INTENSITY; bd->props.brightness = HP680_DEFAULT_INTENSITY; hp680bl_send_intensity(bd); diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c index db9071fc5665..2f177b3a4885 100644 --- a/drivers/video/backlight/jornada720_bl.c +++ b/drivers/video/backlight/jornada720_bl.c @@ -101,10 +101,14 @@ static const struct backlight_ops jornada_bl_ops = { static int jornada_bl_probe(struct platform_device *pdev) { + struct backlight_properties props; int ret; struct backlight_device *bd; - bd = backlight_device_register(S1D_DEVICENAME, &pdev->dev, NULL, &jornada_bl_ops); + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = BL_MAX_BRIGHT; + bd = backlight_device_register(S1D_DEVICENAME, &pdev->dev, NULL, + &jornada_bl_ops, &props); if (IS_ERR(bd)) { ret = PTR_ERR(bd); @@ -117,7 +121,6 @@ static int jornada_bl_probe(struct platform_device *pdev) /* note. make sure max brightness is set otherwise you will get seemingly non-related errors when trying to change brightness */ - bd->props.max_brightness = BL_MAX_BRIGHT; jornada_bl_update_status(bd); platform_set_drvdata(pdev, bd); diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c index 939e7b830cf3..f439a8632287 100644 --- a/drivers/video/backlight/kb3886_bl.c +++ b/drivers/video/backlight/kb3886_bl.c @@ -141,20 +141,24 @@ static const struct backlight_ops kb3886bl_ops = { static int kb3886bl_probe(struct platform_device *pdev) { + struct backlight_properties props; struct kb3886bl_machinfo *machinfo = pdev->dev.platform_data; bl_machinfo = machinfo; if (!machinfo->limit_mask) machinfo->limit_mask = -1; + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = machinfo->max_intensity; kb3886_backlight_device = backlight_device_register("kb3886-bl", - &pdev->dev, NULL, &kb3886bl_ops); + &pdev->dev, NULL, + &kb3886bl_ops, + &props); if (IS_ERR(kb3886_backlight_device)) return PTR_ERR(kb3886_backlight_device); platform_set_drvdata(pdev, kb3886_backlight_device); - kb3886_backlight_device->props.max_brightness = machinfo->max_intensity; kb3886_backlight_device->props.power = FB_BLANK_UNBLANK; kb3886_backlight_device->props.brightness = machinfo->default_intensity; backlight_update_status(kb3886_backlight_device); diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c index 00a9591b0003..7571bc26071e 100644 --- a/drivers/video/backlight/locomolcd.c +++ b/drivers/video/backlight/locomolcd.c @@ -167,6 +167,7 @@ static int locomolcd_resume(struct locomo_dev *dev) static int locomolcd_probe(struct locomo_dev *ldev) { + struct backlight_properties props; unsigned long flags; local_irq_save(flags); @@ -182,13 +183,16 @@ static int locomolcd_probe(struct locomo_dev *ldev) local_irq_restore(flags); - locomolcd_bl_device = backlight_device_register("locomo-bl", &ldev->dev, NULL, &locomobl_data); + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = 4; + locomolcd_bl_device = backlight_device_register("locomo-bl", + &ldev->dev, NULL, + &locomobl_data, &props); if (IS_ERR (locomolcd_bl_device)) return PTR_ERR (locomolcd_bl_device); /* Set up frontlight so that screen is readable */ - locomolcd_bl_device->props.max_brightness = 4, locomolcd_bl_device->props.brightness = 2; locomolcd_set_intensity(locomolcd_bl_device); diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c index c267069a52a3..c91adaf492cf 100644 --- a/drivers/video/backlight/max8925_bl.c +++ b/drivers/video/backlight/max8925_bl.c @@ -104,6 +104,7 @@ static int __devinit max8925_backlight_probe(struct platform_device *pdev) struct max8925_backlight_pdata *pdata = NULL; struct max8925_backlight_data *data; struct backlight_device *bl; + struct backlight_properties props; struct resource *res; char name[MAX8925_NAME_SIZE]; unsigned char value; @@ -133,14 +134,15 @@ static int __devinit max8925_backlight_probe(struct platform_device *pdev) data->chip = chip; data->current_brightness = 0; + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = MAX_BRIGHTNESS; bl = backlight_device_register(name, &pdev->dev, data, - &max8925_backlight_ops); + &max8925_backlight_ops, &props); if (IS_ERR(bl)) { dev_err(&pdev->dev, "failed to register backlight\n"); kfree(data); return PTR_ERR(bl); } - bl->props.max_brightness = MAX_BRIGHTNESS; bl->props.brightness = MAX_BRIGHTNESS; platform_set_drvdata(pdev, bl); diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c index 2e78b0784bdc..0881358eeace 100644 --- a/drivers/video/backlight/mbp_nvidia_bl.c +++ b/drivers/video/backlight/mbp_nvidia_bl.c @@ -250,6 +250,7 @@ static const struct dmi_system_id __initdata mbp_device_table[] = { static int __init mbp_init(void) { + struct backlight_properties props; if (!dmi_check_system(mbp_device_table)) return -ENODEV; @@ -257,14 +258,17 @@ static int __init mbp_init(void) "Macbook Pro backlight")) return -ENXIO; - mbp_backlight_device = backlight_device_register("mbp_backlight", - NULL, NULL, &driver_data->backlight_ops); + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = 15; + mbp_backlight_device = backlight_device_register("mbp_backlight", NULL, + NULL, + &driver_data->backlight_ops, + &props); if (IS_ERR(mbp_backlight_device)) { release_region(driver_data->iostart, driver_data->iolen); return PTR_ERR(mbp_backlight_device); } - mbp_backlight_device->props.max_brightness = 15; mbp_backlight_device->props.brightness = driver_data->backlight_ops.get_brightness(mbp_backlight_device); backlight_update_status(mbp_backlight_device); diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c index a3a7f8938175..333d28e6b062 100644 --- a/drivers/video/backlight/omap1_bl.c +++ b/drivers/video/backlight/omap1_bl.c @@ -132,6 +132,7 @@ static const struct backlight_ops omapbl_ops = { static int omapbl_probe(struct platform_device *pdev) { + struct backlight_properties props; struct backlight_device *dev; struct omap_backlight *bl; struct omap_backlight_config *pdata = pdev->dev.platform_data; @@ -143,7 +144,10 @@ static int omapbl_probe(struct platform_device *pdev) if (unlikely(!bl)) return -ENOMEM; - dev = backlight_device_register("omap-bl", &pdev->dev, bl, &omapbl_ops); + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = OMAPBL_MAX_INTENSITY; + dev = backlight_device_register("omap-bl", &pdev->dev, bl, &omapbl_ops, + &props); if (IS_ERR(dev)) { kfree(bl); return PTR_ERR(dev); @@ -160,7 +164,6 @@ static int omapbl_probe(struct platform_device *pdev) omap_cfg_reg(PWL); /* Conflicts with UART3 */ dev->props.fb_blank = FB_BLANK_UNBLANK; - dev->props.max_brightness = OMAPBL_MAX_INTENSITY; dev->props.brightness = pdata->default_intensity; omapbl_update_status(dev); diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c index 2ec16deb2397..809278c90738 100644 --- a/drivers/video/backlight/progear_bl.c +++ b/drivers/video/backlight/progear_bl.c @@ -61,6 +61,7 @@ static const struct backlight_ops progearbl_ops = { static int progearbl_probe(struct platform_device *pdev) { + struct backlight_properties props; u8 temp; struct backlight_device *progear_backlight_device; int ret; @@ -82,9 +83,12 @@ static int progearbl_probe(struct platform_device *pdev) pci_read_config_byte(sb_dev, SB_MPS1, &temp); pci_write_config_byte(sb_dev, SB_MPS1, temp | 0x20); + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = HW_LEVEL_MAX - HW_LEVEL_MIN; progear_backlight_device = backlight_device_register("progear-bl", &pdev->dev, NULL, - &progearbl_ops); + &progearbl_ops, + &props); if (IS_ERR(progear_backlight_device)) { ret = PTR_ERR(progear_backlight_device); goto put_sb; @@ -94,7 +98,6 @@ static int progearbl_probe(struct platform_device *pdev) progear_backlight_device->props.power = FB_BLANK_UNBLANK; progear_backlight_device->props.brightness = HW_LEVEL_MAX - HW_LEVEL_MIN; - progear_backlight_device->props.max_brightness = HW_LEVEL_MAX - HW_LEVEL_MIN; progearbl_set_intensity(progear_backlight_device); return 0; diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c index 9d2ec2a1cce8..b89eebc3f77d 100644 --- a/drivers/video/backlight/pwm_bl.c +++ b/drivers/video/backlight/pwm_bl.c @@ -65,6 +65,7 @@ static const struct backlight_ops pwm_backlight_ops = { static int pwm_backlight_probe(struct platform_device *pdev) { + struct backlight_properties props; struct platform_pwm_backlight_data *data = pdev->dev.platform_data; struct backlight_device *bl; struct pwm_bl_data *pb; @@ -100,15 +101,16 @@ static int pwm_backlight_probe(struct platform_device *pdev) } else dev_dbg(&pdev->dev, "got pwm for backlight\n"); - bl = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, - pb, &pwm_backlight_ops); + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = data->max_brightness; + bl = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, pb, + &pwm_backlight_ops, &props); if (IS_ERR(bl)) { dev_err(&pdev->dev, "failed to register backlight\n"); ret = PTR_ERR(bl); goto err_bl; } - bl->props.max_brightness = data->max_brightness; bl->props.brightness = data->dft_brightness; backlight_update_status(bl); diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c index e14ce4d469f5..f57bbf170049 100644 --- a/drivers/video/backlight/tosa_bl.c +++ b/drivers/video/backlight/tosa_bl.c @@ -80,6 +80,7 @@ static const struct backlight_ops bl_ops = { static int __devinit tosa_bl_probe(struct i2c_client *client, const struct i2c_device_id *id) { + struct backlight_properties props; struct tosa_bl_data *data = kzalloc(sizeof(struct tosa_bl_data), GFP_KERNEL); int ret = 0; if (!data) @@ -99,15 +100,16 @@ static int __devinit tosa_bl_probe(struct i2c_client *client, i2c_set_clientdata(client, data); data->i2c = client; - data->bl = backlight_device_register("tosa-bl", &client->dev, - data, &bl_ops); + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = 512 - 1; + data->bl = backlight_device_register("tosa-bl", &client->dev, data, + &bl_ops, &props); if (IS_ERR(data->bl)) { ret = PTR_ERR(data->bl); goto err_reg; } data->bl->props.brightness = 69; - data->bl->props.max_brightness = 512 - 1; data->bl->props.power = FB_BLANK_UNBLANK; backlight_update_status(data->bl); diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c index e32add37a203..a4312709fb1b 100644 --- a/drivers/video/backlight/wm831x_bl.c +++ b/drivers/video/backlight/wm831x_bl.c @@ -125,6 +125,7 @@ static int wm831x_backlight_probe(struct platform_device *pdev) struct wm831x_backlight_pdata *pdata; struct wm831x_backlight_data *data; struct backlight_device *bl; + struct backlight_properties props; int ret, i, max_isel, isink_reg, dcdc_cfg; /* We need platform data */ @@ -191,15 +192,15 @@ static int wm831x_backlight_probe(struct platform_device *pdev) data->current_brightness = 0; data->isink_reg = isink_reg; - bl = backlight_device_register("wm831x", &pdev->dev, - data, &wm831x_backlight_ops); + props.max_brightness = max_isel; + bl = backlight_device_register("wm831x", &pdev->dev, data, + &wm831x_backlight_ops, &props); if (IS_ERR(bl)) { dev_err(&pdev->dev, "failed to register backlight\n"); kfree(data); return PTR_ERR(bl); } - bl->props.max_brightness = max_isel; bl->props.brightness = max_isel; platform_set_drvdata(pdev, bl); diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c index 814312a7452f..54df3d44af8f 100644 --- a/drivers/video/bf54x-lq043fb.c +++ b/drivers/video/bf54x-lq043fb.c @@ -501,6 +501,7 @@ static irqreturn_t bfin_bf54x_irq_error(int irq, void *dev_id) static int __devinit bfin_bf54x_probe(struct platform_device *pdev) { + struct backlight_properties props; struct bfin_bf54xfb_info *info; struct fb_info *fbinfo; int ret; @@ -645,10 +646,10 @@ static int __devinit bfin_bf54x_probe(struct platform_device *pdev) goto out8; } #ifndef NO_BL_SUPPORT - bl_dev = - backlight_device_register("bf54x-bl", NULL, NULL, - &bfin_lq043fb_bl_ops); - bl_dev->props.max_brightness = 255; + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = 255; + bl_dev = backlight_device_register("bf54x-bl", NULL, NULL, + &bfin_lq043fb_bl_ops, &props); lcd_dev = lcd_device_register(DRIVER_NAME, &pdev->dev, NULL, &bfin_lcd_ops); lcd_dev->props.max_contrast = 255, printk(KERN_INFO "Done.\n"); diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c index 5653d083a983..3a8e811a7e9f 100644 --- a/drivers/video/bfin-t350mcqb-fb.c +++ b/drivers/video/bfin-t350mcqb-fb.c @@ -419,6 +419,7 @@ static irqreturn_t bfin_t350mcqb_irq_error(int irq, void *dev_id) static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev) { + struct backlight_properties props; struct bfin_t350mcqbfb_info *info; struct fb_info *fbinfo; int ret; @@ -540,10 +541,10 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev) goto out8; } #ifndef NO_BL_SUPPORT - bl_dev = - backlight_device_register("bf52x-bl", NULL, NULL, - &bfin_lq043fb_bl_ops); - bl_dev->props.max_brightness = 255; + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = 255; + bl_dev = backlight_device_register("bf52x-bl", NULL, NULL, + &bfin_lq043fb_bl_ops, &props); lcd_dev = lcd_device_register(DRIVER_NAME, NULL, &bfin_lcd_ops); lcd_dev->props.max_contrast = 255, printk(KERN_INFO "Done.\n"); diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c index 443e3c85a9a0..2fb552a6f32c 100644 --- a/drivers/video/nvidia/nv_backlight.c +++ b/drivers/video/nvidia/nv_backlight.c @@ -94,6 +94,7 @@ static struct backlight_ops nvidia_bl_ops = { void nvidia_bl_init(struct nvidia_par *par) { + struct backlight_properties props; struct fb_info *info = pci_get_drvdata(par->pci_dev); struct backlight_device *bd; char name[12]; @@ -109,7 +110,10 @@ void nvidia_bl_init(struct nvidia_par *par) snprintf(name, sizeof(name), "nvidiabl%d", info->node); - bd = backlight_device_register(name, info->dev, par, &nvidia_bl_ops); + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = FB_BACKLIGHT_LEVELS - 1; + bd = backlight_device_register(name, info->dev, par, &nvidia_bl_ops, + &props); if (IS_ERR(bd)) { info->bl_dev = NULL; printk(KERN_WARNING "nvidia: Backlight registration failed\n"); @@ -121,7 +125,6 @@ void nvidia_bl_init(struct nvidia_par *par) 0x158 * FB_BACKLIGHT_MAX / MAX_LEVEL, 0x534 * FB_BACKLIGHT_MAX / MAX_LEVEL); - bd->props.max_brightness = FB_BACKLIGHT_LEVELS - 1; bd->props.brightness = bd->props.max_brightness; bd->props.power = FB_BLANK_UNBLANK; backlight_update_status(bd); diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c index fcd6a61a91eb..59769e85d41c 100644 --- a/drivers/video/omap2/displays/panel-taal.c +++ b/drivers/video/omap2/displays/panel-taal.c @@ -486,6 +486,7 @@ static struct attribute_group taal_attr_group = { static int taal_probe(struct omap_dss_device *dssdev) { + struct backlight_properties props; struct taal_data *td; struct backlight_device *bldev; int r; @@ -520,11 +521,16 @@ static int taal_probe(struct omap_dss_device *dssdev) /* if no platform set_backlight() defined, presume DSI backlight * control */ + memset(&props, 0, sizeof(struct backlight_properties)); if (!dssdev->set_backlight) td->use_dsi_bl = true; + if (td->use_dsi_bl) + props.max_brightness = 255; + else + props.max_brightness = 127; bldev = backlight_device_register("taal", &dssdev->dev, dssdev, - &taal_bl_ops); + &taal_bl_ops, &props); if (IS_ERR(bldev)) { r = PTR_ERR(bldev); goto err2; @@ -534,13 +540,10 @@ static int taal_probe(struct omap_dss_device *dssdev) bldev->props.fb_blank = FB_BLANK_UNBLANK; bldev->props.power = FB_BLANK_UNBLANK; - if (td->use_dsi_bl) { - bldev->props.max_brightness = 255; + if (td->use_dsi_bl) bldev->props.brightness = 255; - } else { - bldev->props.max_brightness = 127; + else bldev->props.brightness = 127; - } taal_bl_update_status(bldev); diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c index d94c57ffbdb1..618f36bec10d 100644 --- a/drivers/video/riva/fbdev.c +++ b/drivers/video/riva/fbdev.c @@ -338,6 +338,7 @@ static struct backlight_ops riva_bl_ops = { static void riva_bl_init(struct riva_par *par) { + struct backlight_properties props; struct fb_info *info = pci_get_drvdata(par->pdev); struct backlight_device *bd; char name[12]; @@ -353,7 +354,10 @@ static void riva_bl_init(struct riva_par *par) snprintf(name, sizeof(name), "rivabl%d", info->node); - bd = backlight_device_register(name, info->dev, par, &riva_bl_ops); + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = FB_BACKLIGHT_LEVELS - 1; + bd = backlight_device_register(name, info->dev, par, &riva_bl_ops, + &props); if (IS_ERR(bd)) { info->bl_dev = NULL; printk(KERN_WARNING "riva: Backlight registration failed\n"); @@ -365,7 +369,6 @@ static void riva_bl_init(struct riva_par *par) MIN_LEVEL * FB_BACKLIGHT_MAX / MAX_LEVEL, FB_BACKLIGHT_MAX); - bd->props.max_brightness = FB_BACKLIGHT_LEVELS - 1; bd->props.brightness = bd->props.max_brightness; bd->props.power = FB_BLANK_UNBLANK; backlight_update_status(bd); diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 21cd866d24cd..4a3d52e545e1 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -103,7 +103,8 @@ static inline void backlight_update_status(struct backlight_device *bd) } extern struct backlight_device *backlight_device_register(const char *name, - struct device *dev, void *devdata, const struct backlight_ops *ops); + struct device *dev, void *devdata, const struct backlight_ops *ops, + const struct backlight_properties *props); extern void backlight_device_unregister(struct backlight_device *bd); extern void backlight_force_update(struct backlight_device *bd, enum backlight_update_reason reason); -- cgit v1.2.3 From 936034026280facd7050c96c3b28339f28b09cdd Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Sun, 10 Jan 2010 13:27:54 +0100 Subject: leds: ALIX2: Add dependency to !GPIO_CS5335 The ALIX2 LED driver and the CS5535 GPIO drivers share the same I/O range which causes a conflict if they're both enabled. Fix this for now by adding Kconfig dependencies. While at it, also drop the EXPERIMENTAL flag, as the code has been around for awhile already. Note that this is a hack. At some point, a real platform support for this board should be added which handles the LEDs via the leds-gpio driver. Signed-off-by: Daniel Mack Signed-off-by: Richard Purdie --- drivers/leds/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index e0b64312e66a..023eda6dfdb6 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -79,7 +79,7 @@ config LEDS_WRAP config LEDS_ALIX2 tristate "LED Support for ALIX.2 and ALIX.3 series" - depends on LEDS_CLASS && X86 && EXPERIMENTAL + depends on LEDS_CLASS && X86 && !GPIO_CS5535 && !CS5535_GPIO help This option enables support for the PCEngines ALIX.2 and ALIX.3 LEDs. You have to set leds-alix2.force=1 for boards with Award BIOS. -- cgit v1.2.3 From 5e89a3484dea8a3d962f83fe497d064fbcde4e55 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20N=C3=A9meth?= Date: Sun, 10 Jan 2010 01:11:03 +0100 Subject: leds: make PCI device id constant MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The id_table field of the struct pci_driver is constant in so it is worth to make pci_device_id also constant. The semantic match that finds this kind of pattern is as follows: (http://coccinelle.lip6.fr/) // @r@ disable decl_init,const_decl_init; identifier I1, I2, x; @@ struct I1 { ... const struct I2 *x; ... }; @s@ identifier r.I1, y; identifier r.x, E; @@ struct I1 y = { .x = E, }; @c@ identifier r.I2; identifier s.E; @@ const struct I2 E[] = ... ; @depends on !c@ identifier r.I2; identifier s.E; @@ + const struct I2 E[] = ...; // Signed-off-by: Márton Németh Signed-off-by: Richard Purdie --- drivers/leds/leds-ss4200.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c index 97f04984c1ca..51477ec71391 100644 --- a/drivers/leds/leds-ss4200.c +++ b/drivers/leds/leds-ss4200.c @@ -63,7 +63,7 @@ MODULE_LICENSE("GPL"); /* * PCI ID of the Intel ICH7 LPC Device within which the GPIO block lives. */ -static struct pci_device_id ich7_lpc_pci_id[] = +static const struct pci_device_id ich7_lpc_pci_id[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1) }, -- cgit v1.2.3 From bb9b6ef70f08f256ab4b8ec127c17ee629b85350 Mon Sep 17 00:00:00 2001 From: H Hartley Sweeten Date: Wed, 6 Jan 2010 15:34:55 -0700 Subject: leds: led-class.c - Quiet boot messages As each led device gets registered a kernel message gets printed. In an embedded system with a number of leds this can produce a lot of output that just looks like noise. Change the message type to KERN_DEBUG since it might be useful in the dmesg output "after" booting. Signed-off-by: H Hartley Sweeten Signed-off-by: Richard Purdie --- drivers/leds/led-class.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index 782f95822eab..349e07350144 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c @@ -164,7 +164,7 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev) led_trigger_set_default(led_cdev); #endif - printk(KERN_INFO "Registered led device: %s\n", + printk(KERN_DEBUG "Registered led device: %s\n", led_cdev->name); return 0; -- cgit v1.2.3 From d09e16664be88dc8463fe7508a2123460bf6d676 Mon Sep 17 00:00:00 2001 From: H Hartley Sweeten Date: Wed, 20 Jan 2010 16:08:30 -0700 Subject: leds: Kconfig cleanup Remove the need for "depends on LEDS_CLASS" by wrapping the affected config options in an if/endif block. Similar for "depends on LEDS_TRIGGERS". LEDS_COBALT_RAQ still has a "depends on LEDS_CLASS=y" since it cannot be selected to build as a module. Signed-off-by: H Hartley Sweeten Signed-off-by: Richard Purdie --- drivers/leds/Kconfig | 75 +++++++++++++++++++++++++++------------------------- 1 file changed, 39 insertions(+), 36 deletions(-) diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index 023eda6dfdb6..d86e1a3ee0b7 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -15,6 +15,8 @@ config LEDS_CLASS This option enables the led sysfs class in /sys/class/leds. You'll need this to do anything useful with LEDs. If unsure, say N. +if LEDS_CLASS + comment "LED drivers" config LEDS_88PM860X @@ -26,73 +28,73 @@ config LEDS_88PM860X config LEDS_ATMEL_PWM tristate "LED Support using Atmel PWM outputs" - depends on LEDS_CLASS && ATMEL_PWM + depends on ATMEL_PWM help This option enables support for LEDs driven using outputs of the dedicated PWM controller found on newer Atmel SOCs. config LEDS_LOCOMO tristate "LED Support for Locomo device" - depends on LEDS_CLASS && SHARP_LOCOMO + depends on SHARP_LOCOMO help This option enables support for the LEDs on Sharp Locomo. Zaurus models SL-5500 and SL-5600. config LEDS_MIKROTIK_RB532 tristate "LED Support for Mikrotik Routerboard 532" - depends on LEDS_CLASS && MIKROTIK_RB532 + depends on MIKROTIK_RB532 help This option enables support for the so called "User LED" of Mikrotik's Routerboard 532. config LEDS_S3C24XX tristate "LED Support for Samsung S3C24XX GPIO LEDs" - depends on LEDS_CLASS && ARCH_S3C2410 + depends on ARCH_S3C2410 help This option enables support for LEDs connected to GPIO lines on Samsung S3C24XX series CPUs, such as the S3C2410 and S3C2440. config LEDS_AMS_DELTA tristate "LED Support for the Amstrad Delta (E3)" - depends on LEDS_CLASS && MACH_AMS_DELTA + depends on MACH_AMS_DELTA help This option enables support for the LEDs on Amstrad Delta (E3). config LEDS_NET48XX tristate "LED Support for Soekris net48xx series Error LED" - depends on LEDS_CLASS && SCx200_GPIO + depends on SCx200_GPIO help This option enables support for the Soekris net4801 and net4826 error LED. config LEDS_FSG tristate "LED Support for the Freecom FSG-3" - depends on LEDS_CLASS && MACH_FSG + depends on MACH_FSG help This option enables support for the LEDs on the Freecom FSG-3. config LEDS_WRAP tristate "LED Support for the WRAP series LEDs" - depends on LEDS_CLASS && SCx200_GPIO + depends on SCx200_GPIO help This option enables support for the PCEngines WRAP programmable LEDs. config LEDS_ALIX2 tristate "LED Support for ALIX.2 and ALIX.3 series" - depends on LEDS_CLASS && X86 && !GPIO_CS5535 && !CS5535_GPIO + depends on X86 && !GPIO_CS5535 && !CS5535_GPIO help This option enables support for the PCEngines ALIX.2 and ALIX.3 LEDs. You have to set leds-alix2.force=1 for boards with Award BIOS. config LEDS_H1940 tristate "LED Support for iPAQ H1940 device" - depends on LEDS_CLASS && ARCH_H1940 + depends on ARCH_H1940 help This option enables support for the LEDs on the h1940. config LEDS_COBALT_QUBE tristate "LED Support for the Cobalt Qube series front LED" - depends on LEDS_CLASS && MIPS_COBALT + depends on MIPS_COBALT help This option enables support for the front LED on Cobalt Qube series @@ -105,7 +107,7 @@ config LEDS_COBALT_RAQ config LEDS_SUNFIRE tristate "LED support for SunFire servers." - depends on LEDS_CLASS && SPARC64 + depends on SPARC64 select LEDS_TRIGGERS help This option enables support for the Left, Middle, and Right @@ -113,14 +115,14 @@ config LEDS_SUNFIRE config LEDS_HP6XX tristate "LED Support for the HP Jornada 6xx" - depends on LEDS_CLASS && SH_HP6XX + depends on SH_HP6XX help This option enables LED support for the handheld HP Jornada 620/660/680/690. config LEDS_PCA9532 tristate "LED driver for PCA9532 dimmer" - depends on LEDS_CLASS && I2C && INPUT && EXPERIMENTAL + depends on I2C && INPUT && EXPERIMENTAL help This option enables support for NXP pca9532 LED controller. It is generally only useful @@ -128,7 +130,7 @@ config LEDS_PCA9532 config LEDS_GPIO tristate "LED Support for GPIO connected LEDs" - depends on LEDS_CLASS && GENERIC_GPIO + depends on GENERIC_GPIO help This option enables support for the LEDs connected to GPIO outputs. To be useful the particular board must have LEDs @@ -155,7 +157,7 @@ config LEDS_GPIO_OF config LEDS_LP3944 tristate "LED Support for N.S. LP3944 (Fun Light) I2C chip" - depends on LEDS_CLASS && I2C + depends on I2C help This option enables support for LEDs connected to the National Semiconductor LP3944 Lighting Management Unit (LMU) also known as @@ -166,7 +168,7 @@ config LEDS_LP3944 config LEDS_CLEVO_MAIL tristate "Mail LED on Clevo notebook" - depends on LEDS_CLASS && X86 && SERIO_I8042 && DMI + depends on X86 && SERIO_I8042 && DMI help This driver makes the mail LED accessible from userspace programs through the leds subsystem. This LED have three @@ -196,7 +198,7 @@ config LEDS_CLEVO_MAIL config LEDS_PCA955X tristate "LED Support for PCA955x I2C chips" - depends on LEDS_CLASS && I2C + depends on I2C help This option enables support for LEDs connected to PCA955x LED driver chips accessed via the I2C bus. Supported @@ -204,54 +206,54 @@ config LEDS_PCA955X config LEDS_WM831X_STATUS tristate "LED support for status LEDs on WM831x PMICs" - depends on LEDS_CLASS && MFD_WM831X + depends on MFD_WM831X help This option enables support for the status LEDs of the WM831x series of PMICs. config LEDS_WM8350 tristate "LED Support for WM8350 AudioPlus PMIC" - depends on LEDS_CLASS && MFD_WM8350 + depends on MFD_WM8350 help This option enables support for LEDs driven by the Wolfson Microelectronics WM8350 AudioPlus PMIC. config LEDS_DA903X tristate "LED Support for DA9030/DA9034 PMIC" - depends on LEDS_CLASS && PMIC_DA903X + depends on PMIC_DA903X help This option enables support for on-chip LED drivers found on Dialog Semiconductor DA9030/DA9034 PMICs. config LEDS_DAC124S085 tristate "LED Support for DAC124S085 SPI DAC" - depends on LEDS_CLASS && SPI + depends on SPI help This option enables support for DAC124S085 SPI DAC from NatSemi, which can be used to control up to four LEDs. config LEDS_PWM tristate "PWM driven LED Support" - depends on LEDS_CLASS && HAVE_PWM + depends on HAVE_PWM help This option enables support for pwm driven LEDs config LEDS_REGULATOR tristate "REGULATOR driven LED support" - depends on LEDS_CLASS && REGULATOR + depends on REGULATOR help This option enables support for regulator driven LEDs. config LEDS_BD2802 tristate "LED driver for BD2802 RGB LED" - depends on LEDS_CLASS && I2C + depends on I2C help This option enables support for BD2802GU RGB LED driver chips accessed via the I2C bus. config LEDS_INTEL_SS4200 tristate "LED driver for Intel NAS SS4200 series" - depends on LEDS_CLASS && PCI && DMI + depends on PCI && DMI help This option enables support for the Intel SS4200 series of Network Attached Storage servers. You may control the hard @@ -260,7 +262,7 @@ config LEDS_INTEL_SS4200 config LEDS_LT3593 tristate "LED driver for LT3593 controllers" - depends on LEDS_CLASS && GENERIC_GPIO + depends on GENERIC_GPIO help This option enables support for LEDs driven by a Linear Technology LT3593 controller. This controller uses a special one-wire pulse @@ -268,7 +270,7 @@ config LEDS_LT3593 config LEDS_ADP5520 tristate "LED Support for ADP5520/ADP5501 PMIC" - depends on LEDS_CLASS && PMIC_ADP5520 + depends on PMIC_ADP5520 help This option enables support for on-chip LED drivers found on Analog Devices ADP5520/ADP5501 PMICs. @@ -276,8 +278,6 @@ config LEDS_ADP5520 To compile this driver as a module, choose M here: the module will be called leds-adp5520. -comment "LED Triggers" - config LEDS_TRIGGERS bool "LED Trigger support" help @@ -285,9 +285,12 @@ config LEDS_TRIGGERS These triggers allow kernel events to drive the LEDs and can be configured via sysfs. If unsure, say Y. +if LEDS_TRIGGERS + +comment "LED Triggers" + config LEDS_TRIGGER_TIMER tristate "LED Timer Trigger" - depends on LEDS_TRIGGERS help This allows LEDs to be controlled by a programmable timer via sysfs. Some LED hardware can be programmed to start @@ -298,14 +301,13 @@ config LEDS_TRIGGER_TIMER config LEDS_TRIGGER_IDE_DISK bool "LED IDE Disk Trigger" - depends on LEDS_TRIGGERS && IDE_GD_ATA + depends on IDE_GD_ATA help This allows LEDs to be controlled by IDE disk activity. If unsure, say Y. config LEDS_TRIGGER_HEARTBEAT tristate "LED Heartbeat Trigger" - depends on LEDS_TRIGGERS help This allows LEDs to be controlled by a CPU load average. The flash frequency is a hyperbolic function of the 1-minute @@ -314,7 +316,6 @@ config LEDS_TRIGGER_HEARTBEAT config LEDS_TRIGGER_BACKLIGHT tristate "LED backlight Trigger" - depends on LEDS_TRIGGERS help This allows LEDs to be controlled as a backlight device: they turn off and on when the display is blanked and unblanked. @@ -323,7 +324,6 @@ config LEDS_TRIGGER_BACKLIGHT config LEDS_TRIGGER_GPIO tristate "LED GPIO Trigger" - depends on LEDS_TRIGGERS depends on GPIOLIB help This allows LEDs to be controlled by gpio events. It's good @@ -336,7 +336,6 @@ config LEDS_TRIGGER_GPIO config LEDS_TRIGGER_DEFAULT_ON tristate "LED Default ON Trigger" - depends on LEDS_TRIGGERS help This allows LEDs to be initialised in the ON state. If unsure, say Y. @@ -344,4 +343,8 @@ config LEDS_TRIGGER_DEFAULT_ON comment "iptables trigger is under Netfilter config (LED target)" depends on LEDS_TRIGGERS +endif # LEDS_TRIGGERS + +endif # LEDS_CLASS + endif # NEW_LEDS -- cgit v1.2.3 From 72dcd8d08aca4ac6154dc37243880ee306c7ea73 Mon Sep 17 00:00:00 2001 From: Bob Rodgers Date: Wed, 17 Feb 2010 15:23:31 -0600 Subject: leds: Add Dell Business Class Netbook LED driver This patch adds an LED driver to support the Dell Activity LED on the Dell Latitude 2100 netbook and future products to come. The Activity LED is visible externally in the lid so classroom instructors can observe it from a distance. The driver uses the sysfs led_class and provides a standard LED interface. Signed-off by: Bob Rodgers Signed-off-by: Louis Davis Signed-off-by: Jim Dailey , Developers Acked-by: Matthew Garrett Acked-by: Dmitry Torokhov Signed-off-by: Richard Purdie --- drivers/leds/Kconfig | 7 ++ drivers/leds/Makefile | 1 + drivers/leds/dell-led.c | 200 ++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 208 insertions(+) create mode 100644 drivers/leds/dell-led.c diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index d86e1a3ee0b7..505eb64c329c 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -278,6 +278,13 @@ config LEDS_ADP5520 To compile this driver as a module, choose M here: the module will be called leds-adp5520. +config LEDS_DELL_NETBOOKS + tristate "External LED on Dell Business Netbooks" + depends on X86 && ACPI_WMI + help + This adds support for the Latitude 2100 and similar + notebooks that have an external LED. + config LEDS_TRIGGERS bool "LED Trigger support" help diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile index d76fb32b77c0..0cd8b9957380 100644 --- a/drivers/leds/Makefile +++ b/drivers/leds/Makefile @@ -34,6 +34,7 @@ obj-$(CONFIG_LEDS_REGULATOR) += leds-regulator.o obj-$(CONFIG_LEDS_INTEL_SS4200) += leds-ss4200.o obj-$(CONFIG_LEDS_LT3593) += leds-lt3593.o obj-$(CONFIG_LEDS_ADP5520) += leds-adp5520.o +obj-$(CONFIG_LEDS_DELL_NETBOOKS) += dell-led.o # LED SPI Drivers obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o diff --git a/drivers/leds/dell-led.c b/drivers/leds/dell-led.c new file mode 100644 index 000000000000..ee310891fff8 --- /dev/null +++ b/drivers/leds/dell-led.c @@ -0,0 +1,200 @@ +/* + * dell_led.c - Dell LED Driver + * + * Copyright (C) 2010 Dell Inc. + * Louis Davis + * Jim Dailey + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + */ + +#include +#include + +MODULE_AUTHOR("Louis Davis/Jim Dailey"); +MODULE_DESCRIPTION("Dell LED Control Driver"); +MODULE_LICENSE("GPL"); + +#define DELL_LED_BIOS_GUID "F6E4FE6E-909D-47cb-8BAB-C9F6F2F8D396" +MODULE_ALIAS("wmi:" DELL_LED_BIOS_GUID); + +/* Error Result Codes: */ +#define INVALID_DEVICE_ID 250 +#define INVALID_PARAMETER 251 +#define INVALID_BUFFER 252 +#define INTERFACE_ERROR 253 +#define UNSUPPORTED_COMMAND 254 +#define UNSPECIFIED_ERROR 255 + +/* Device ID */ +#define DEVICE_ID_PANEL_BACK 1 + +/* LED Commands */ +#define CMD_LED_ON 16 +#define CMD_LED_OFF 17 +#define CMD_LED_BLINK 18 + +struct bios_args { + unsigned char length; + unsigned char result_code; + unsigned char device_id; + unsigned char command; + unsigned char on_time; + unsigned char off_time; +}; + +static int dell_led_perform_fn(u8 length, + u8 result_code, + u8 device_id, + u8 command, + u8 on_time, + u8 off_time) +{ + struct bios_args *bios_return; + u8 return_code; + union acpi_object *obj; + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_buffer input; + acpi_status status; + + struct bios_args args; + args.length = length; + args.result_code = result_code; + args.device_id = device_id; + args.command = command; + args.on_time = on_time; + args.off_time = off_time; + + input.length = sizeof(struct bios_args); + input.pointer = &args; + + status = wmi_evaluate_method(DELL_LED_BIOS_GUID, + 1, + 1, + &input, + &output); + + if (ACPI_FAILURE(status)) + return status; + + obj = output.pointer; + + if (!obj) + return -EINVAL; + else if (obj->type != ACPI_TYPE_BUFFER) { + kfree(obj); + return -EINVAL; + } + + bios_return = ((struct bios_args *)obj->buffer.pointer); + return_code = bios_return->result_code; + + kfree(obj); + + return return_code; +} + +static int led_on(void) +{ + return dell_led_perform_fn(3, /* Length of command */ + INTERFACE_ERROR, /* Init to INTERFACE_ERROR */ + DEVICE_ID_PANEL_BACK, /* Device ID */ + CMD_LED_ON, /* Command */ + 0, /* not used */ + 0); /* not used */ +} + +static int led_off(void) +{ + return dell_led_perform_fn(3, /* Length of command */ + INTERFACE_ERROR, /* Init to INTERFACE_ERROR */ + DEVICE_ID_PANEL_BACK, /* Device ID */ + CMD_LED_OFF, /* Command */ + 0, /* not used */ + 0); /* not used */ +} + +static int led_blink(unsigned char on_eighths, + unsigned char off_eighths) +{ + return dell_led_perform_fn(5, /* Length of command */ + INTERFACE_ERROR, /* Init to INTERFACE_ERROR */ + DEVICE_ID_PANEL_BACK, /* Device ID */ + CMD_LED_BLINK, /* Command */ + on_eighths, /* blink on in eigths of a second */ + off_eighths); /* blink off in eights of a second */ +} + +static void dell_led_set(struct led_classdev *led_cdev, + enum led_brightness value) +{ + if (value == LED_OFF) + led_off(); + else + led_on(); +} + +static int dell_led_blink(struct led_classdev *led_cdev, + unsigned long *delay_on, + unsigned long *delay_off) +{ + unsigned long on_eighths; + unsigned long off_eighths; + + /* The Dell LED delay is based on 125ms intervals. + Need to round up to next interval. */ + + on_eighths = (*delay_on + 124) / 125; + if (0 == on_eighths) + on_eighths = 1; + if (on_eighths > 255) + on_eighths = 255; + *delay_on = on_eighths * 125; + + off_eighths = (*delay_off + 124) / 125; + if (0 == off_eighths) + off_eighths = 1; + if (off_eighths > 255) + off_eighths = 255; + *delay_off = off_eighths * 125; + + led_blink(on_eighths, off_eighths); + + return 0; +} + +static struct led_classdev dell_led = { + .name = "dell::lid", + .brightness = LED_OFF, + .max_brightness = 1, + .brightness_set = dell_led_set, + .blink_set = dell_led_blink, + .flags = LED_CORE_SUSPENDRESUME, +}; + +static int __init dell_led_init(void) +{ + int error = 0; + + if (!wmi_has_guid(DELL_LED_BIOS_GUID)) + return -ENODEV; + + error = led_off(); + if (error != 0) + return -ENODEV; + + return led_classdev_register(NULL, &dell_led); +} + +static void __exit dell_led_exit(void) +{ + led_classdev_unregister(&dell_led); + + led_off(); +} + +module_init(dell_led_init); +module_exit(dell_led_exit); -- cgit v1.2.3 From 0493a4ff10959ff4c8e0d65efee25b7ffd4fa5db Mon Sep 17 00:00:00 2001 From: Anton Vorontsov Date: Thu, 11 Mar 2010 13:58:47 -0800 Subject: leds-gpio: fix default state handling on OF platforms The driver wrongly sets default state for LEDs that don't specify default-state property. Currently the driver handles default state this way: memset(&led, 0, sizeof(led)); for_each_child_of_node(np, child) { state = of_get_property(child, "default-state", NULL); if (state) { if (!strcmp(state, "keep")) led.default_state = LEDS_GPIO_DEFSTATE_KEEP; ... } ret = create_gpio_led(&led, ...); } Which means that all LEDs that do not specify default-state will inherit the last value of the default-state property, which is wrong. This patch fixes the issue by moving LED's template initialization into the loop body. Signed-off-by: Anton Vorontsov Signed-off-by: Andrew Morton Signed-off-by: Richard Purdie --- drivers/leds/leds-gpio.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c index e5225d28f392..0823e2622e8c 100644 --- a/drivers/leds/leds-gpio.c +++ b/drivers/leds/leds-gpio.c @@ -211,7 +211,6 @@ static int __devinit of_gpio_leds_probe(struct of_device *ofdev, const struct of_device_id *match) { struct device_node *np = ofdev->node, *child; - struct gpio_led led; struct gpio_led_of_platform_data *pdata; int count = 0, ret; @@ -226,8 +225,8 @@ static int __devinit of_gpio_leds_probe(struct of_device *ofdev, if (!pdata) return -ENOMEM; - memset(&led, 0, sizeof(led)); for_each_child_of_node(np, child) { + struct gpio_led led = {}; enum of_gpio_flags flags; const char *state; -- cgit v1.2.3 From 36bc5ee6a8d13333980fa54e97d3469d3d4cda98 Mon Sep 17 00:00:00 2001 From: Evan McClain Date: Tue, 9 Mar 2010 19:20:58 -0500 Subject: backlight: mbp_nvidia_bl - add five more MacBook variants This adds the MacBook 1,1 2,1 3,1 4,1 and 4,2 to the DMI tables. Signed-off-by: Evan McClain Signed-off-by: Richard Purdie --- drivers/video/backlight/mbp_nvidia_bl.c | 45 +++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c index 0881358eeace..1b5d3fe6bbbc 100644 --- a/drivers/video/backlight/mbp_nvidia_bl.c +++ b/drivers/video/backlight/mbp_nvidia_bl.c @@ -137,6 +137,51 @@ static int mbp_dmi_match(const struct dmi_system_id *id) } static const struct dmi_system_id __initdata mbp_device_table[] = { + { + .callback = mbp_dmi_match, + .ident = "MacBook 1,1", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"), + }, + .driver_data = (void *)&intel_chipset_data, + }, + { + .callback = mbp_dmi_match, + .ident = "MacBook 2,1", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBook2,1"), + }, + .driver_data = (void *)&intel_chipset_data, + }, + { + .callback = mbp_dmi_match, + .ident = "MacBook 3,1", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBook3,1"), + }, + .driver_data = (void *)&intel_chipset_data, + }, + { + .callback = mbp_dmi_match, + .ident = "MacBook 4,1", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBook4,1"), + }, + .driver_data = (void *)&intel_chipset_data, + }, + { + .callback = mbp_dmi_match, + .ident = "MacBook 4,2", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBook4,2"), + }, + .driver_data = (void *)&intel_chipset_data, + }, { .callback = mbp_dmi_match, .ident = "MacBookPro 3,1", -- cgit v1.2.3 From f0af78991363d704694a3618b638662c97d8a110 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bruno=20Pr=C3=A9mont?= Date: Fri, 26 Feb 2010 12:59:39 +0100 Subject: backlight: classmate-laptop - Fix missing registration failure handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Check newly registered backlight_device for error and properly return error to parent. Mark struct backlight_ops as const. Signed-off-by: Bruno Prémont Signed-off-by: Richard Purdie --- drivers/platform/x86/classmate-laptop.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c index 6670ed8f9e5b..c696cf1c2616 100644 --- a/drivers/platform/x86/classmate-laptop.c +++ b/drivers/platform/x86/classmate-laptop.c @@ -455,7 +455,7 @@ static int cmpc_bl_update_status(struct backlight_device *bd) return -1; } -static struct backlight_ops cmpc_bl_ops = { +static const struct backlight_ops cmpc_bl_ops = { .get_brightness = cmpc_bl_get_brightness, .update_status = cmpc_bl_update_status }; @@ -469,6 +469,8 @@ static int cmpc_bl_add(struct acpi_device *acpi) props.max_brightness = 7; bd = backlight_device_register("cmpc_bl", &acpi->dev, acpi->handle, &cmpc_bl_ops, &props); + if (IS_ERR(bd)) + return PTR_ERR(bd); dev_set_drvdata(&acpi->dev, bd); return 0; } -- cgit v1.2.3 From fa11de0a33e214a00e205494c27fb5a7bb71a5fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bruno=20Pr=C3=A9mont?= Date: Fri, 26 Feb 2010 13:04:54 +0100 Subject: backlight: blackfin - Fix missing registration failure handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Check newly registered backlight_device for error and properly return error to parent Mark struct backlight_ops as const. Signed-off-by: Bruno Prémont Acked-by: Mike Frysinger (constify struct backlight_ops) Signed-off-by: Richard Purdie --- drivers/video/bf54x-lq043fb.c | 10 +++++++++- drivers/video/bfin-t350mcqb-fb.c | 10 +++++++++- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c index 54df3d44af8f..23b2a8c0dbfc 100644 --- a/drivers/video/bf54x-lq043fb.c +++ b/drivers/video/bf54x-lq043fb.c @@ -433,7 +433,7 @@ static int bl_get_brightness(struct backlight_device *bd) return 0; } -static struct backlight_ops bfin_lq043fb_bl_ops = { +static const struct backlight_ops bfin_lq043fb_bl_ops = { .get_brightness = bl_get_brightness, }; @@ -650,6 +650,12 @@ static int __devinit bfin_bf54x_probe(struct platform_device *pdev) props.max_brightness = 255; bl_dev = backlight_device_register("bf54x-bl", NULL, NULL, &bfin_lq043fb_bl_ops, &props); + if (IS_ERR(bl_dev)) { + printk(KERN_ERR DRIVER_NAME + ": unable to register backlight.\n"); + ret = -EINVAL; + goto out9; + } lcd_dev = lcd_device_register(DRIVER_NAME, &pdev->dev, NULL, &bfin_lcd_ops); lcd_dev->props.max_contrast = 255, printk(KERN_INFO "Done.\n"); @@ -657,6 +663,8 @@ static int __devinit bfin_bf54x_probe(struct platform_device *pdev) return 0; +out9: + unregister_framebuffer(fbinfo); out8: free_irq(info->irq, info); out7: diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c index 3a8e811a7e9f..31a2dec927bb 100644 --- a/drivers/video/bfin-t350mcqb-fb.c +++ b/drivers/video/bfin-t350mcqb-fb.c @@ -352,7 +352,7 @@ static int bl_get_brightness(struct backlight_device *bd) return 0; } -static struct backlight_ops bfin_lq043fb_bl_ops = { +static const struct backlight_ops bfin_lq043fb_bl_ops = { .get_brightness = bl_get_brightness, }; @@ -545,6 +545,12 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev) props.max_brightness = 255; bl_dev = backlight_device_register("bf52x-bl", NULL, NULL, &bfin_lq043fb_bl_ops, &props); + if (IS_ERR(bl_dev)) { + printk(KERN_ERR DRIVER_NAME + ": unable to register backlight.\n"); + ret = -EINVAL; + goto out9; + } lcd_dev = lcd_device_register(DRIVER_NAME, NULL, &bfin_lcd_ops); lcd_dev->props.max_contrast = 255, printk(KERN_INFO "Done.\n"); @@ -552,6 +558,8 @@ static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev) return 0; +out9: + unregister_framebuffer(fbinfo); out8: free_irq(info->irq, info); out7: -- cgit v1.2.3 From 28d85873cd6d8d3176e30e02b941b1329df1024c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bruno=20Pr=C3=A9mont?= Date: Fri, 26 Feb 2010 13:17:16 +0100 Subject: backlight: msi-laptop, msi-wmi: fix incomplete registration failure handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Properly return backlight registration error to parent. Mark struct backlight_ops as const. Signed-off-by: Bruno Prémont Reviewed-by: Anisse Astier Signed-off-by: Richard Purdie --- drivers/platform/x86/msi-wmi.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c index fb7ccaae6563..367caaae2f3c 100644 --- a/drivers/platform/x86/msi-wmi.c +++ b/drivers/platform/x86/msi-wmi.c @@ -138,7 +138,7 @@ static int bl_set_status(struct backlight_device *bd) return msi_wmi_set_block(0, backlight_map[bright]); } -static struct backlight_ops msi_backlight_ops = { +static const struct backlight_ops msi_backlight_ops = { .get_brightness = bl_get, .update_status = bl_set_status, }; @@ -255,8 +255,10 @@ static int __init msi_wmi_init(void) backlight = backlight_device_register(DRV_NAME, NULL, NULL, &msi_backlight_ops, &props); - if (IS_ERR(backlight)) + if (IS_ERR(backlight)) { + err = PTR_ERR(backlight); goto err_free_input; + } err = bl_get(NULL); if (err < 0) -- cgit v1.2.3 From ec57af9c2ece22ae6234189972105d777ff5f939 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bruno=20Pr=C3=A9mont?= Date: Fri, 26 Feb 2010 13:20:10 +0100 Subject: backlight: panasonic-laptop - Fix incomplete registration failure handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Properly return backlight registration error to parent. Mark struct backlight_ops as const. Signed-off-by: Bruno Prémont Acked-by: Harald Welte (registration failure) Signed-off-by: Richard Purdie --- drivers/platform/x86/panasonic-laptop.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c index ab5c9cea1462..726f02affcb6 100644 --- a/drivers/platform/x86/panasonic-laptop.c +++ b/drivers/platform/x86/panasonic-laptop.c @@ -352,7 +352,7 @@ static int bl_set_status(struct backlight_device *bd) return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright); } -static struct backlight_ops pcc_backlight_ops = { +static const struct backlight_ops pcc_backlight_ops = { .get_brightness = bl_get, .update_status = bl_set_status, }; @@ -651,8 +651,10 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device) props.max_brightness = pcc->sinf[SINF_AC_MAX_BRIGHT]; pcc->backlight = backlight_device_register("panasonic", NULL, pcc, &pcc_backlight_ops, &props); - if (IS_ERR(pcc->backlight)) + if (IS_ERR(pcc->backlight)) { + result = PTR_ERR(pcc->backlight); goto out_sinf; + } /* read the initial brightness setting from the hardware */ pcc->backlight->props.brightness = pcc->sinf[SINF_AC_CUR_BRIGHT]; -- cgit v1.2.3 From 14b5d6dd40b3091cb5f566568baa4a74dc619286 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 10 Mar 2010 18:32:18 +0100 Subject: leds: Fix race between LED device uevent and actual attributes creation If we were to dynamically register/unregister leds and have udev or other daemons handle the leds class uevents, we would be notified of the adding of a new LED and if the daemon immediately tries to open one of the attributes of the led device, it would fail with a "no such file or directory" error since this the attributes are not yet created. Fix this by switching attributes to be class-wide, such that the driver core will register these attributes with device_add_attrs and then emit the kobject_uevent ADD signal. Signed-off-by: Fainelli Signed-off-by: Richard Purdie --- drivers/leds/led-class.c | 40 ++++++++-------------------------------- 1 file changed, 8 insertions(+), 32 deletions(-) diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index 349e07350144..69e7d86a5143 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c @@ -72,11 +72,14 @@ static ssize_t led_max_brightness_show(struct device *dev, return sprintf(buf, "%u\n", led_cdev->max_brightness); } -static DEVICE_ATTR(brightness, 0644, led_brightness_show, led_brightness_store); -static DEVICE_ATTR(max_brightness, 0444, led_max_brightness_show, NULL); +static struct device_attribute led_class_attrs[] = { + __ATTR(brightness, 0644, led_brightness_show, led_brightness_store), + __ATTR(max_brightness, 0644, led_max_brightness_show, NULL), #ifdef CONFIG_LEDS_TRIGGERS -static DEVICE_ATTR(trigger, 0644, led_trigger_show, led_trigger_store); + __ATTR(trigger, 0644, led_trigger_show, led_trigger_store), #endif + __ATTR_NULL, +}; /** * led_classdev_suspend - suspend an led_classdev. @@ -127,18 +130,11 @@ static int led_resume(struct device *dev) */ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev) { - int rc; - led_cdev->dev = device_create(leds_class, parent, 0, led_cdev, "%s", led_cdev->name); if (IS_ERR(led_cdev->dev)) return PTR_ERR(led_cdev->dev); - /* register the attributes */ - rc = device_create_file(led_cdev->dev, &dev_attr_brightness); - if (rc) - goto err_out; - #ifdef CONFIG_LEDS_TRIGGERS init_rwsem(&led_cdev->trigger_lock); #endif @@ -150,17 +146,9 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev) if (!led_cdev->max_brightness) led_cdev->max_brightness = LED_FULL; - rc = device_create_file(led_cdev->dev, &dev_attr_max_brightness); - if (rc) - goto err_out_attr_max; - led_update_brightness(led_cdev); #ifdef CONFIG_LEDS_TRIGGERS - rc = device_create_file(led_cdev->dev, &dev_attr_trigger); - if (rc) - goto err_out_led_list; - led_trigger_set_default(led_cdev); #endif @@ -168,18 +156,8 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev) led_cdev->name); return 0; - -#ifdef CONFIG_LEDS_TRIGGERS -err_out_led_list: - device_remove_file(led_cdev->dev, &dev_attr_max_brightness); -#endif -err_out_attr_max: - device_remove_file(led_cdev->dev, &dev_attr_brightness); - list_del(&led_cdev->node); -err_out: - device_unregister(led_cdev->dev); - return rc; } + EXPORT_SYMBOL_GPL(led_classdev_register); /** @@ -190,10 +168,7 @@ EXPORT_SYMBOL_GPL(led_classdev_register); */ void led_classdev_unregister(struct led_classdev *led_cdev) { - device_remove_file(led_cdev->dev, &dev_attr_max_brightness); - device_remove_file(led_cdev->dev, &dev_attr_brightness); #ifdef CONFIG_LEDS_TRIGGERS - device_remove_file(led_cdev->dev, &dev_attr_trigger); down_write(&led_cdev->trigger_lock); if (led_cdev->trigger) led_trigger_set(led_cdev, NULL); @@ -215,6 +190,7 @@ static int __init leds_init(void) return PTR_ERR(leds_class); leds_class->suspend = led_suspend; leds_class->resume = led_resume; + leds_class->dev_attrs = led_class_attrs; return 0; } -- cgit v1.2.3 From cd9640a70d542ca026a812ac34733799da0a39c9 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 16 Mar 2010 18:55:54 +0000 Subject: xfs: remove old vmap cache Re-apply a commit that had been reverted due to regressions that have since been fixed. Original commit: d2859751cd0bf586941ffa7308635a293f943c17 Author: Nick Piggin Date: Tue, 6 Jan 2009 14:40:44 +1100 XFS's vmap batching simply defers a number (up to 64) of vunmaps, and keeps track of them in a list. To purge the batch, it just goes through the list and calls vunamp on each one. This is pretty poor: a global TLB flush is generally still performed on each vunmap, with the most expensive parts of the operation being the broadcast IPIs and locking involved in the SMP callouts, and the locking involved in the vmap management -- none of these are avoided by just batching up the calls. I'm actually surprised it ever made much difference. (Now that the lazy vmap allocator is upstream, this description is not quite right, but the vunmap batching still doesn't seem to do much). Rip all this logic out of XFS completely. I will improve vmap performance and scalability directly in subsequent patch. Signed-off-by: Nick Piggin Reviewed-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy The only change I made was to use the "new" xfs_buf_is_vmapped() function in a place it had been open-coded in the original. Modified-by: Alex Elder Signed-off-by: Christoph Hellwig Signed-off-by: Alex Elder --- fs/xfs/linux-2.6/xfs_buf.c | 76 +--------------------------------------------- 1 file changed, 1 insertion(+), 75 deletions(-) diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 6f76ba85f193..81f4ef27de3e 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -167,75 +167,6 @@ test_page_region( return (mask && (page_private(page) & mask) == mask); } -/* - * Mapping of multi-page buffers into contiguous virtual space - */ - -typedef struct a_list { - void *vm_addr; - struct a_list *next; -} a_list_t; - -static a_list_t *as_free_head; -static int as_list_len; -static DEFINE_SPINLOCK(as_lock); - -/* - * Try to batch vunmaps because they are costly. - */ -STATIC void -free_address( - void *addr) -{ - a_list_t *aentry; - -#ifdef CONFIG_XEN - /* - * Xen needs to be able to make sure it can get an exclusive - * RO mapping of pages it wants to turn into a pagetable. If - * a newly allocated page is also still being vmap()ed by xfs, - * it will cause pagetable construction to fail. This is a - * quick workaround to always eagerly unmap pages so that Xen - * is happy. - */ - vunmap(addr); - return; -#endif - - aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT); - if (likely(aentry)) { - spin_lock(&as_lock); - aentry->next = as_free_head; - aentry->vm_addr = addr; - as_free_head = aentry; - as_list_len++; - spin_unlock(&as_lock); - } else { - vunmap(addr); - } -} - -STATIC void -purge_addresses(void) -{ - a_list_t *aentry, *old; - - if (as_free_head == NULL) - return; - - spin_lock(&as_lock); - aentry = as_free_head; - as_free_head = NULL; - as_list_len = 0; - spin_unlock(&as_lock); - - while ((old = aentry) != NULL) { - vunmap(aentry->vm_addr); - aentry = aentry->next; - kfree(old); - } -} - /* * Internal xfs_buf_t object manipulation */ @@ -337,7 +268,7 @@ xfs_buf_free( uint i; if (xfs_buf_is_vmapped(bp)) - free_address(bp->b_addr - bp->b_offset); + vunmap(bp->b_addr - bp->b_offset); for (i = 0; i < bp->b_page_count; i++) { struct page *page = bp->b_pages[i]; @@ -457,8 +388,6 @@ _xfs_buf_map_pages( bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; bp->b_flags |= XBF_MAPPED; } else if (flags & XBF_MAPPED) { - if (as_list_len > 64) - purge_addresses(); bp->b_addr = vmap(bp->b_pages, bp->b_page_count, VM_MAP, PAGE_KERNEL); if (unlikely(bp->b_addr == NULL)) @@ -1955,9 +1884,6 @@ xfsbufd( xfs_buf_iostrategy(bp); count++; } - - if (as_list_len > 0) - purge_addresses(); if (count) blk_run_address_space(target->bt_mapping); -- cgit v1.2.3 From 8a262e573d30187b32b5534ec489446931239cc5 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 16 Mar 2010 18:55:56 +0000 Subject: xfs: use scalable vmap API Re-apply a commit that had been reverted due to regressions that have since been fixed. From 95f8e302c04c0b0c6de35ab399a5551605eeb006 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Tue, 6 Jan 2009 14:43:09 +1100 Implement XFS's large buffer support with the new vmap APIs. See the vmap rewrite (db64fe02) for some numbers. The biggest improvement that comes from using the new APIs is avoiding the global KVA allocation lock on every call. Signed-off-by: Nick Piggin Reviewed-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy Only modifications here were a minor reformat, plus making the patch apply given the new use of xfs_buf_is_vmapped(). Modified-by: Alex Elder Signed-off-by: Christoph Hellwig Signed-off-by: Alex Elder --- fs/xfs/linux-2.6/xfs_buf.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 81f4ef27de3e..bd111b7e1daa 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -268,7 +268,8 @@ xfs_buf_free( uint i; if (xfs_buf_is_vmapped(bp)) - vunmap(bp->b_addr - bp->b_offset); + vm_unmap_ram(bp->b_addr - bp->b_offset, + bp->b_page_count); for (i = 0; i < bp->b_page_count; i++) { struct page *page = bp->b_pages[i]; @@ -388,8 +389,8 @@ _xfs_buf_map_pages( bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; bp->b_flags |= XBF_MAPPED; } else if (flags & XBF_MAPPED) { - bp->b_addr = vmap(bp->b_pages, bp->b_page_count, - VM_MAP, PAGE_KERNEL); + bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count, + -1, PAGE_KERNEL); if (unlikely(bp->b_addr == NULL)) return -ENOMEM; bp->b_addr += bp->b_offset; -- cgit v1.2.3 From e8c3753ce4cd6a805ebcfdb3aa6d30e6f4b8b3e0 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Mon, 15 Mar 2010 02:36:35 +0000 Subject: xfs: don't warn about page discards on shutdown If we are doing a forced shutdown, we can get lots of noise about delalloc pages being discarded. This is happens by design during a forced shutdown, so don't spam the logs with these messages. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Signed-off-by: Alex Elder --- fs/xfs/linux-2.6/xfs_aops.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 9083357f9e44..99628508cb11 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -932,6 +932,9 @@ xfs_aops_discard_page( if (!xfs_is_delayed_page(page, IOMAP_DELAY)) goto out_invalidate; + if (XFS_FORCED_SHUTDOWN(ip->i_mount)) + goto out_invalidate; + xfs_fs_cmn_err(CE_ALERT, ip->i_mount, "page discard on page %p, inode 0x%llx, offset %llu.", page, ip->i_ino, offset); @@ -964,8 +967,10 @@ xfs_aops_discard_page( if (error) { /* something screwed, just bail */ - xfs_fs_cmn_err(CE_ALERT, ip->i_mount, - "page discard failed delalloc mapping lookup."); + if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { + xfs_fs_cmn_err(CE_ALERT, ip->i_mount, + "page discard failed delalloc mapping lookup."); + } break; } if (!nimaps) { @@ -991,8 +996,10 @@ xfs_aops_discard_page( ASSERT(!flist.xbf_count && !flist.xbf_first); if (error) { /* something screwed, just bail */ - xfs_fs_cmn_err(CE_ALERT, ip->i_mount, + if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { + xfs_fs_cmn_err(CE_ALERT, ip->i_mount, "page discard unable to remove delalloc mapping."); + } break; } next_buffer: -- cgit v1.2.3 From f04e879bf296d136bcafd8c5a26e95599b141671 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 16 Mar 2010 14:40:42 -0700 Subject: sunxvr1000: Add missing FB=y depenency. Signed-off-by: David S. Miller --- drivers/video/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index a5755b88de2a..f15fb024e806 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -911,7 +911,7 @@ config FB_XVR2500 config FB_XVR1000 bool "Sun XVR-1000 support" - depends on SPARC64 + depends on (FB = y) && SPARC64 select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT -- cgit v1.2.3 From d6ccb1f55ddf5146219707c0e71b85e3a52179b4 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Wed, 10 Mar 2010 23:33:25 -0600 Subject: powerpc/85xx: Make sure lwarx hint isn't set on ppc32 e500v1/v2 based chips will treat any reserved field being set in an opcode as illegal. Thus always setting the hint in the opcode is a bad idea. Anton should be kept away from the powerpc opcode map. Signed-off-by: Kumar Gala --- arch/powerpc/include/asm/ppc-opcode.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index aea714797590..d553bbeb726c 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -25,7 +25,7 @@ #define PPC_INST_LDARX 0x7c0000a8 #define PPC_INST_LSWI 0x7c0004aa #define PPC_INST_LSWX 0x7c00042a -#define PPC_INST_LWARX 0x7c000029 +#define PPC_INST_LWARX 0x7c000028 #define PPC_INST_LWSYNC 0x7c2004ac #define PPC_INST_LXVD2X 0x7c000698 #define PPC_INST_MCRXR 0x7c000400 @@ -62,8 +62,8 @@ #define __PPC_T_TLB(t) (((t) & 0x3) << 21) #define __PPC_WC(w) (((w) & 0x3) << 21) /* - * Only use the larx hint bit on 64bit CPUs. Once we verify it doesn't have - * any side effects on all 32bit processors, we can do this all the time. + * Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a + * larx with EH set as an illegal instruction. */ #ifdef CONFIG_PPC64 #define __PPC_EH(eh) (((eh) & 0x1) << 0) -- cgit v1.2.3 From 9d296cfa69b3d4abc9d556986d544f0727988eed Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Tue, 16 Mar 2010 23:39:56 -0500 Subject: powerpc/fsl-booke: Get coherent bit from PTE We shouldn't be always setting 'M' in the TLB entry since its reasonable for somethings to be mapped non-coherent. The PTE should have 'M' set properly. Signed-off-by: Kumar Gala --- arch/powerpc/kernel/head_fsl_booke.S | 3 --- 1 file changed, 3 deletions(-) diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 25793bb0e782..a7cf4934342c 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -746,9 +746,6 @@ finish_tlb_load: rlwimi r12, r11, 32-19, 27, 31 /* extract WIMGE from pte */ #else rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */ -#endif -#ifdef CONFIG_SMP - ori r12, r12, MAS2_M #endif mtspr SPRN_MAS2, r12 -- cgit v1.2.3 From dcd5c1662db59a6b82942f47fb6ac9dd63f6d3dd Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 16 Mar 2010 01:05:02 +0100 Subject: perf: Fix unexported generic perf_arch_fetch_caller_regs perf_arch_fetch_caller_regs() is exported for the overriden x86 version, but not for the generic weak version. As a general rule, weak functions should not have their symbol exported in the same file they are defined. So let's export it on trace_event_perf.c as it is used by trace events only. This fixes: ERROR: ".perf_arch_fetch_caller_regs" [fs/xfs/xfs.ko] undefined! ERROR: ".perf_arch_fetch_caller_regs" [arch/powerpc/platforms/cell/spufs/spufs.ko] undefined! -v2: And also only build it if trace events are enabled. -v3: Fix changelog mistake Reported-by: Stephen Rothwell Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Xiao Guangrong Cc: Paul Mackerras LKML-Reference: <1268697902-9518-1-git-send-regression-fweisbec@gmail.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 3 ++- kernel/perf_event.c | 2 ++ kernel/trace/trace_event_perf.c | 2 ++ 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 7645faea8e85..60398a0d947c 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1702,6 +1702,7 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) return entry; } +#ifdef CONFIG_EVENT_TRACING void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) { regs->ip = ip; @@ -1713,4 +1714,4 @@ void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int ski regs->cs = __KERNEL_CS; local_save_flags(regs->flags); } -EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs); +#endif diff --git a/kernel/perf_event.c b/kernel/perf_event.c index fb3031cf9f17..574ee58a3046 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2786,10 +2786,12 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) return NULL; } +#ifdef CONFIG_EVENT_TRACING __weak void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) { } +#endif /* * Output diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 0709e4f75114..7d79a10c3cde 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -12,6 +12,8 @@ DEFINE_PER_CPU(struct pt_regs, perf_trace_regs); EXPORT_PER_CPU_SYMBOL_GPL(perf_trace_regs); +EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs); + static char *perf_trace_buf; static char *perf_trace_buf_nmi; -- cgit v1.2.3 From 603037c3d1a42d5013f035355a2c60b0006a9fdf Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 11 Mar 2010 11:37:16 +0900 Subject: ahci: add missing nv IDs bko#15481 shows that we're missing some NVIDIA ahci PCI IDs. Peer Chen confirms that IDs 0x580-0x58f are reserved for cases where Linux ID option is selected in the BIOS and are only used for mcp65-73. Add 0x0581-0x058f. http://bugzilla.kernel.org/show_bug.cgi?id=15481 Signed-off-by: Tejun Heo Cc: Peer Chen Signed-off-by: Jeff Garzik --- drivers/ata/ahci.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 6bd930b93bcc..9e5b121fb0cb 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -641,6 +641,21 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq }, /* MCP67 */ { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq }, /* MCP67 */ { PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_yesncq }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x0581), board_ahci_yesncq }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x0582), board_ahci_yesncq }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x0583), board_ahci_yesncq }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x0584), board_ahci_yesncq }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x0585), board_ahci_yesncq }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x0586), board_ahci_yesncq }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x0587), board_ahci_yesncq }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x0588), board_ahci_yesncq }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x0589), board_ahci_yesncq }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x058a), board_ahci_yesncq }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x058b), board_ahci_yesncq }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x058c), board_ahci_yesncq }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x058d), board_ahci_yesncq }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x058e), board_ahci_yesncq }, /* Linux ID */ + { PCI_VDEVICE(NVIDIA, 0x058f), board_ahci_yesncq }, /* Linux ID */ { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq }, /* MCP73 */ { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq }, /* MCP73 */ { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq }, /* MCP73 */ -- cgit v1.2.3 From 9deb343189b3cf45e84dd08480f330575ffe2004 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 16 Mar 2010 09:50:26 +0900 Subject: ahci: use BIOS date in broken_suspend list HP is recycling both DMI_PRODUCT_NAME and DMI_BIOS_VERSION making ahci_broken_suspend() trigger for later products which are not affected by the original problems. Match BIOS date instead of version and add references to bko's so that full information can be found easier later. This fixes http://bugzilla.kernel.org/show_bug.cgi?id=15462 Signed-off-by: Tejun Heo Reported-by: tigerfishdaisy@gmail.com Signed-off-by: Jeff Garzik --- drivers/ata/ahci.c | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 9e5b121fb0cb..e7e2c7a147f1 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -3037,6 +3037,14 @@ static bool ahci_broken_suspend(struct pci_dev *pdev) * On HP dv[4-6] and HDX18 with earlier BIOSen, link * to the harddisk doesn't become online after * resuming from STR. Warn and fail suspend. + * + * http://bugzilla.kernel.org/show_bug.cgi?id=12276 + * + * Use dates instead of versions to match as HP is + * apparently recycling both product and version + * strings. + * + * http://bugzilla.kernel.org/show_bug.cgi?id=15462 */ { .ident = "dv4", @@ -3045,7 +3053,7 @@ static bool ahci_broken_suspend(struct pci_dev *pdev) DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"), }, - .driver_data = "F.30", /* cutoff BIOS version */ + .driver_data = "20090105", /* F.30 */ }, { .ident = "dv5", @@ -3054,7 +3062,7 @@ static bool ahci_broken_suspend(struct pci_dev *pdev) DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv5 Notebook PC"), }, - .driver_data = "F.16", /* cutoff BIOS version */ + .driver_data = "20090506", /* F.16 */ }, { .ident = "dv6", @@ -3063,7 +3071,7 @@ static bool ahci_broken_suspend(struct pci_dev *pdev) DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv6 Notebook PC"), }, - .driver_data = "F.21", /* cutoff BIOS version */ + .driver_data = "20090423", /* F.21 */ }, { .ident = "HDX18", @@ -3072,7 +3080,7 @@ static bool ahci_broken_suspend(struct pci_dev *pdev) DMI_MATCH(DMI_PRODUCT_NAME, "HP HDX18 Notebook PC"), }, - .driver_data = "F.23", /* cutoff BIOS version */ + .driver_data = "20090430", /* F.23 */ }, /* * Acer eMachines G725 has the same problem. BIOS @@ -3080,6 +3088,8 @@ static bool ahci_broken_suspend(struct pci_dev *pdev) * work. Inbetween, there are V1.06, V2.06 and V3.03 * that we don't have much idea about. For now, * blacklist anything older than V3.04. + * + * http://bugzilla.kernel.org/show_bug.cgi?id=15104 */ { .ident = "G725", @@ -3087,19 +3097,21 @@ static bool ahci_broken_suspend(struct pci_dev *pdev) DMI_MATCH(DMI_SYS_VENDOR, "eMachines"), DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"), }, - .driver_data = "V3.04", /* cutoff BIOS version */ + .driver_data = "20091216", /* V3.04 */ }, { } /* terminate list */ }; const struct dmi_system_id *dmi = dmi_first_match(sysids); - const char *ver; + int year, month, date; + char buf[9]; if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2)) return false; - ver = dmi_get_system_info(DMI_BIOS_VERSION); + dmi_get_date(DMI_BIOS_DATE, &year, &month, &date); + snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date); - return !ver || strcmp(ver, dmi->driver_data) < 0; + return strcmp(buf, dmi->driver_data) < 0; } static bool ahci_broken_online(struct pci_dev *pdev) -- cgit v1.2.3 From 5db5b0215af94a36d4bf10900ff9707b6d5c1610 Mon Sep 17 00:00:00 2001 From: Shane Huang Date: Tue, 16 Mar 2010 18:08:55 +0800 Subject: ahci: pp->active_link is not reliable when FBS is enabled pp->active_link is not reliable when FBS is enabled. Both PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because mixed NCQ and non-NCQ commands may be in flight. Signed-off-by: Shane Huang Signed-off-by: Jeff Garzik --- drivers/ata/ahci.c | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index e7e2c7a147f1..fdc9bcbe55a2 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -2278,7 +2278,7 @@ static void ahci_port_intr(struct ata_port *ap) struct ahci_port_priv *pp = ap->private_data; struct ahci_host_priv *hpriv = ap->host->private_data; int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING); - u32 status, qc_active; + u32 status, qc_active = 0; int rc; status = readl(port_mmio + PORT_IRQ_STAT); @@ -2336,11 +2336,22 @@ static void ahci_port_intr(struct ata_port *ap) } } - /* pp->active_link is valid iff any command is in flight */ - if (ap->qc_active && pp->active_link->sactive) - qc_active = readl(port_mmio + PORT_SCR_ACT); - else - qc_active = readl(port_mmio + PORT_CMD_ISSUE); + /* pp->active_link is not reliable once FBS is enabled, both + * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because + * NCQ and non-NCQ commands may be in flight at the same time. + */ + if (pp->fbs_enabled) { + if (ap->qc_active) { + qc_active = readl(port_mmio + PORT_SCR_ACT); + qc_active |= readl(port_mmio + PORT_CMD_ISSUE); + } + } else { + /* pp->active_link is valid iff any command is in flight */ + if (ap->qc_active && pp->active_link->sactive) + qc_active = readl(port_mmio + PORT_SCR_ACT); + else + qc_active = readl(port_mmio + PORT_CMD_ISSUE); + } rc = ata_qc_complete_multiple(ap, qc_active); -- cgit v1.2.3 From e9e58a4ec3b1086d1ed8c915311aef1ae55454fd Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Mon, 15 Mar 2010 00:34:57 -0400 Subject: memcg: avoid use cmpxchg in swap cgroup maintainance swap_cgroup uses 2bytes data and uses cmpxchg in a new operation. 2byte cmpxchg/xchg is not available on some archs. This patch replaces cmpxchg/xchg with operations under lock. Signed-off-by: KAMEZAWA Hiroyuki Reported-by: Sachin Sant wrote: Acked-by: Balbir Singh Acked-by: Daisuke Nishimura Cc: Li Zefan Cc: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_cgroup.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c index 3dd88539a0e6..6c0081441a32 100644 --- a/mm/page_cgroup.c +++ b/mm/page_cgroup.c @@ -284,6 +284,7 @@ static DEFINE_MUTEX(swap_cgroup_mutex); struct swap_cgroup_ctrl { struct page **map; unsigned long length; + spinlock_t lock; }; struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES]; @@ -353,16 +354,22 @@ unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, struct swap_cgroup_ctrl *ctrl; struct page *mappage; struct swap_cgroup *sc; + unsigned long flags; + unsigned short retval; ctrl = &swap_cgroup_ctrl[type]; mappage = ctrl->map[idx]; sc = page_address(mappage); sc += pos; - if (cmpxchg(&sc->id, old, new) == old) - return old; + spin_lock_irqsave(&ctrl->lock, flags); + retval = sc->id; + if (retval == old) + sc->id = new; else - return 0; + retval = 0; + spin_unlock_irqrestore(&ctrl->lock, flags); + return retval; } /** @@ -383,13 +390,17 @@ unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) struct page *mappage; struct swap_cgroup *sc; unsigned short old; + unsigned long flags; ctrl = &swap_cgroup_ctrl[type]; mappage = ctrl->map[idx]; sc = page_address(mappage); sc += pos; - old = xchg(&sc->id, id); + spin_lock_irqsave(&ctrl->lock, flags); + old = sc->id; + sc->id = id; + spin_unlock_irqrestore(&ctrl->lock, flags); return old; } @@ -441,6 +452,7 @@ int swap_cgroup_swapon(int type, unsigned long max_pages) mutex_lock(&swap_cgroup_mutex); ctrl->length = length; ctrl->map = array; + spin_lock_init(&ctrl->lock); if (swap_cgroup_prepare(type)) { /* memory shortage */ ctrl->map = NULL; -- cgit v1.2.3 From bcc54e2a6d8e93ff83ec398511930b0a73e19151 Mon Sep 17 00:00:00 2001 From: Venkatesh Pallipadi Date: Mon, 15 Mar 2010 00:34:59 -0400 Subject: jffs2: fix up rb_root initializations to use RB_ROOT jffs2 uses rb_node = NULL; to zero rb_root. The problem with this is that 17d9ddc72fb8bba0d4f678 ("rbtree: Add support for augmented rbtrees") in the linux-next tree adds a new field to that struct which needs to be NULL as well. This patch uses RB_ROOT as the intializer so all of the relevant fields will be NULL'd. Signed-off-by: Venkatesh Pallipadi Cc: Eric Paris Acked-by: David Woodhouse Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/jffs2/readinode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c index e22de8397b74..d32ee9412cb9 100644 --- a/fs/jffs2/readinode.c +++ b/fs/jffs2/readinode.c @@ -567,7 +567,7 @@ static void jffs2_free_tmp_dnode_info_list(struct rb_root *list) else BUG(); } } - list->rb_node = NULL; + *list = RB_ROOT; } static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd) -- cgit v1.2.3 From c4af96449e20f9245cf3d904098db508cdebcda8 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Mon, 15 Mar 2010 00:35:00 -0400 Subject: ntfs: use bitmap_weight Use bitmap_weight() instead of doing hweight32() for each u32 element in the page. Signed-off-by: Akinobu Mita Cc: Anton Altaparmakov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ntfs/super.c | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c index 1cf39dfaee7a..0de1db6cddbf 100644 --- a/fs/ntfs/super.c +++ b/fs/ntfs/super.c @@ -31,6 +31,7 @@ #include #include #include +#include #include "sysctl.h" #include "logfile.h" @@ -2458,7 +2459,6 @@ static void ntfs_put_super(struct super_block *sb) static s64 get_nr_free_clusters(ntfs_volume *vol) { s64 nr_free = vol->nr_clusters; - u32 *kaddr; struct address_space *mapping = vol->lcnbmp_ino->i_mapping; struct page *page; pgoff_t index, max_index; @@ -2477,7 +2477,8 @@ static s64 get_nr_free_clusters(ntfs_volume *vol) ntfs_debug("Reading $Bitmap, max_index = 0x%lx, max_size = 0x%lx.", max_index, PAGE_CACHE_SIZE / 4); for (index = 0; index < max_index; index++) { - unsigned int i; + unsigned long *kaddr; + /* * Read the page from page cache, getting it from backing store * if necessary, and increment the use count. @@ -2490,16 +2491,16 @@ static s64 get_nr_free_clusters(ntfs_volume *vol) nr_free -= PAGE_CACHE_SIZE * 8; continue; } - kaddr = (u32*)kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page, KM_USER0); /* - * For each 4 bytes, subtract the number of set bits. If this + * Subtract the number of set bits. If this * is the last page and it is partial we don't really care as * it just means we do a little extra work but it won't affect * the result as all out of range bytes are set to zero by * ntfs_readpage(). */ - for (i = 0; i < PAGE_CACHE_SIZE / 4; i++) - nr_free -= (s64)hweight32(kaddr[i]); + nr_free -= bitmap_weight(kaddr, + PAGE_CACHE_SIZE * BITS_PER_BYTE); kunmap_atomic(kaddr, KM_USER0); page_cache_release(page); } @@ -2538,7 +2539,6 @@ static s64 get_nr_free_clusters(ntfs_volume *vol) static unsigned long __get_nr_free_mft_records(ntfs_volume *vol, s64 nr_free, const pgoff_t max_index) { - u32 *kaddr; struct address_space *mapping = vol->mftbmp_ino->i_mapping; struct page *page; pgoff_t index; @@ -2548,7 +2548,8 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol, ntfs_debug("Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = " "0x%lx.", max_index, PAGE_CACHE_SIZE / 4); for (index = 0; index < max_index; index++) { - unsigned int i; + unsigned long *kaddr; + /* * Read the page from page cache, getting it from backing store * if necessary, and increment the use count. @@ -2561,16 +2562,16 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol, nr_free -= PAGE_CACHE_SIZE * 8; continue; } - kaddr = (u32*)kmap_atomic(page, KM_USER0); + kaddr = kmap_atomic(page, KM_USER0); /* - * For each 4 bytes, subtract the number of set bits. If this + * Subtract the number of set bits. If this * is the last page and it is partial we don't really care as * it just means we do a little extra work but it won't affect * the result as all out of range bytes are set to zero by * ntfs_readpage(). */ - for (i = 0; i < PAGE_CACHE_SIZE / 4; i++) - nr_free -= (s64)hweight32(kaddr[i]); + nr_free -= bitmap_weight(kaddr, + PAGE_CACHE_SIZE * BITS_PER_BYTE); kunmap_atomic(kaddr, KM_USER0); page_cache_release(page); } -- cgit v1.2.3 From e5d6151115aee73825c1752aff7cd09adfece839 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Mon, 15 Mar 2010 00:35:01 -0400 Subject: hpet: use for_each_set_bit() Replace open-coded loop with for_each_set_bit(). Signed-off-by: Akinobu Mita Cc: Clemens Ladisch Cc: Bob Picco Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/char/hpet.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index e481c5938bad..9c5eea3ea4de 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -215,9 +215,7 @@ static void hpet_timer_set_irq(struct hpet_dev *devp) else v &= ~0xffff; - for (irq = find_first_bit(&v, HPET_MAX_IRQ); irq < HPET_MAX_IRQ; - irq = find_next_bit(&v, HPET_MAX_IRQ, 1 + irq)) { - + for_each_set_bit(irq, &v, HPET_MAX_IRQ) { if (irq >= nr_irqs) { irq = HPET_MAX_IRQ; break; -- cgit v1.2.3 From bc32df00894f0e1dbf583cc3dab210d2969b078a Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 15 Mar 2010 00:35:03 -0400 Subject: memory hotplug: allow setting of phys_device /sys/devices/system/memory/memoryX/phys_device is supposed to contain the number of the physical device that the corresponding piece of memory belongs to. In case a physical device should be replaced or taken offline for whatever reason it is necessary to set all corresponding memory pieces offline. The current implementation always sets phys_device to '0' and there is no way or hook to change that. Seems like there was a plan to implement that but it wasn't finished for whatever reason. So add a weak function which architectures can override to actually set the phys_device from within add_memory_block(). Signed-off-by: Heiko Carstens Cc: Dave Hansen Cc: Gerald Schaefer Cc: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/base/memory.c | 15 ++++++++++----- include/linux/memory.h | 2 ++ 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 2f8691511190..db0848e54cc6 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -429,12 +429,16 @@ static inline int memory_fail_init(void) * differentiation between which *physical* devices each * section belongs to... */ +int __weak arch_get_memory_phys_device(unsigned long start_pfn) +{ + return 0; +} static int add_memory_block(int nid, struct mem_section *section, - unsigned long state, int phys_device, - enum mem_add_context context) + unsigned long state, enum mem_add_context context) { struct memory_block *mem = kzalloc(sizeof(*mem), GFP_KERNEL); + unsigned long start_pfn; int ret = 0; if (!mem) @@ -443,7 +447,8 @@ static int add_memory_block(int nid, struct mem_section *section, mem->phys_index = __section_nr(section); mem->state = state; mutex_init(&mem->state_mutex); - mem->phys_device = phys_device; + start_pfn = section_nr_to_pfn(mem->phys_index); + mem->phys_device = arch_get_memory_phys_device(start_pfn); ret = register_memory(mem, section); if (!ret) @@ -515,7 +520,7 @@ int remove_memory_block(unsigned long node_id, struct mem_section *section, */ int register_new_memory(int nid, struct mem_section *section) { - return add_memory_block(nid, section, MEM_OFFLINE, 0, HOTPLUG); + return add_memory_block(nid, section, MEM_OFFLINE, HOTPLUG); } int unregister_memory_section(struct mem_section *section) @@ -548,7 +553,7 @@ int __init memory_dev_init(void) if (!present_section_nr(i)) continue; err = add_memory_block(0, __nr_to_section(i), MEM_ONLINE, - 0, BOOT); + BOOT); if (!ret) ret = err; } diff --git a/include/linux/memory.h b/include/linux/memory.h index 1adfe779eb99..85582e1bcee9 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -36,6 +36,8 @@ struct memory_block { struct sys_device sysdev; }; +int arch_get_memory_phys_device(unsigned long start_pfn); + /* These states are exposed to userspace as text strings in sysfs */ #define MEM_ONLINE (1<<0) /* exposed to userspace */ #define MEM_GOING_OFFLINE (1<<1) /* exposed to userspace */ -- cgit v1.2.3 From 57b552ba0b2faf7cce66d476ef8ce7f6210c62fd Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 15 Mar 2010 00:35:05 -0400 Subject: memory hotplug/s390: set phys_device Implement arch specific arch_get_memory_phys_device function and initialize phys_device for each memory section. That way we finally can tell which piece of memory belongs to which physical device. This makes s390's /sys/devices/system/memory/memoryX/phys_device display the correct thing? Signed-off-by: Heiko Carstens Cc: Dave Hansen Cc: Gerald Schaefer Cc: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/s390/char/sclp_cmd.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index b3beab610da4..fc7ae05ce48a 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c @@ -704,6 +704,13 @@ int sclp_chp_deconfigure(struct chp_id chpid) return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8); } +int arch_get_memory_phys_device(unsigned long start_pfn) +{ + if (!rzm) + return 0; + return PFN_PHYS(start_pfn) / rzm; +} + struct chp_info_sccb { struct sccb_header header; u8 recognized[SCLP_CHP_INFO_MASK_SIZE]; -- cgit v1.2.3 From 1976152fd8e706135deed6cf333e347c08416056 Mon Sep 17 00:00:00 2001 From: Grant Likely Date: Thu, 18 Mar 2010 07:30:31 -0600 Subject: of: Fix comparison of "compatible" properties Commit 7c7b60cb87547b1664a4385c187f029bf514a737 "of: put default string compare and #a/s-cell values into common header" Breaks various things on powerpc due to using strncasecmp instead of strcasecmp for comparing against "compatible" strings. This causes things like the 4xx PCI code to fail miserably due to the partial matches in code like this: for_each_compatible_node(np, NULL, "ibm,plb-pcix") ppc4xx_probe_pcix_bridge(np); for_each_compatible_node(np, NULL, "ibm,plb-pci") ppc4xx_probe_pci_bridge(np); It's not quite right to do partial name match. Entries in a compatible list are meant to be matched whole. If a device is compatible with both "foo" and "foo1", then the device should have both strings in its "compatible" property. This patch reverts powerpc and microblaze us to use strcasecmp. Signed-off-by: Benjamin Herrenschmidt (for patch description) Signed-off-by: Grant Likely Acked-by: David S. Miller Acked-by: Michal Simek --- include/linux/of.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/of.h b/include/linux/of.h index f6d9cbc39c9c..a367e19bb3af 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -127,7 +127,7 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size) /* Default string compare functions, Allow arch asm/prom.h to override */ #if !defined(of_compat_cmp) -#define of_compat_cmp(s1, s2, l) strncasecmp((s1), (s2), (l)) +#define of_compat_cmp(s1, s2, l) strcasecmp((s1), (s2)) #define of_prop_cmp(s1, s2) strcmp((s1), (s2)) #define of_node_cmp(s1, s2) strcasecmp((s1), (s2)) #endif -- cgit v1.2.3 From 9205124c66a69664c6825501cb062ebd2acd3b3a Mon Sep 17 00:00:00 2001 From: Grant Likely Date: Thu, 18 Mar 2010 14:01:18 -0600 Subject: powerpc/5200: Build fix for mpc52xx watchdog timer code mpc52xx_gpt_wdt_setup is defined as 0, which causes the following build failure with gcc 4.5, since it's built with -Werror. arch/powerpc/platforms/52xx/mpc52xx_gpt.c:761:3: error: statement with no effect Changing it to a static inline fixes the problem. Reported-by: Jeff Mahoney Signed-off-by: Grant Likely --- arch/powerpc/platforms/52xx/mpc52xx_gpt.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c index 072b948b2e2d..5d7cc88dae6b 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c @@ -711,7 +711,11 @@ static int __devinit mpc52xx_gpt_wdt_init(void) return 0; } -#define mpc52xx_gpt_wdt_setup(x, y) (0) +static inline int mpc52xx_gpt_wdt_setup(struct mpc52xx_gpt_priv *gpt, + const u32 *period) +{ + return 0; +} #endif /* CONFIG_MPC5200_WDT */ -- cgit v1.2.3 From abc6e1341bda974e2d0eddb75f57a20ac18e9b33 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Thu, 18 Mar 2010 12:10:08 -0400 Subject: Btrfs: fix key checks and advance in the search ioctl The search ioctl was working well for finding tree roots, but using it for generic searches requires a few changes to how the keys are advanced. This treats the search control min fields for objectid, type and offset more like a key, where we drop the offset to zero once we bump the type, etc. The downside of this is that we are changing the min_type and min_offset fields during the search, and so the ioctl caller needs extra checks to make sure the keys in the result are the ones it wanted. This also changes key_in_sk to use btrfs_comp_cpu_keys, just to make things more readable. Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 39 +++++++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 14 deletions(-) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 4329610b141b..291aa51ff420 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -914,17 +914,23 @@ out: static noinline int key_in_sk(struct btrfs_key *key, struct btrfs_ioctl_search_key *sk) { - if (key->objectid < sk->min_objectid) - return 0; - if (key->offset < sk->min_offset) - return 0; - if (key->type < sk->min_type) - return 0; - if (key->objectid > sk->max_objectid) - return 0; - if (key->type > sk->max_type) + struct btrfs_key test; + int ret; + + test.objectid = sk->min_objectid; + test.type = sk->min_type; + test.offset = sk->min_offset; + + ret = btrfs_comp_cpu_keys(key, &test); + if (ret < 0) return 0; - if (key->offset > sk->max_offset) + + test.objectid = sk->max_objectid; + test.type = sk->max_type; + test.offset = sk->max_offset; + + ret = btrfs_comp_cpu_keys(key, &test); + if (ret > 0) return 0; return 1; } @@ -998,13 +1004,18 @@ static noinline int copy_to_sk(struct btrfs_root *root, break; } advance_key: - if (key->offset < (u64)-1) + ret = 0; + if (key->offset < (u64)-1 && key->offset < sk->max_offset) key->offset++; - else if (key->type < (u8)-1) + else if (key->type < (u8)-1 && key->type < sk->max_type) { + key->offset = 0; key->type++; - else if (key->objectid < (u64)-1) + } else if (key->objectid < (u64)-1 && key->objectid < sk->max_objectid) { + key->offset = 0; + key->type = 0; key->objectid++; - ret = 0; + } else + ret = 1; overflow: *num_found += found; return ret; -- cgit v1.2.3 From 90fdde147fd32d18a20be5b498d5f26e56cca8a3 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Thu, 18 Mar 2010 12:14:54 -0400 Subject: Btrfs: return keys for large items to the search ioctl The search ioctl was skipping large items entirely (ones that are too big for the results buffer). This changes things to at least copy the item header so that we can send information about the item back to userland. Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 291aa51ff420..fd757f576956 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -997,8 +997,8 @@ static noinline int copy_to_sk(struct btrfs_root *root, read_extent_buffer(leaf, p, item_off, item_len); *sk_offset += item_len; - found++; } + found++; if (*num_found >= sk->nr_items) break; -- cgit v1.2.3 From 1b53ac4d1b75b23bdc2b54ace787b8f718a987ef Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Thu, 18 Mar 2010 12:17:05 -0400 Subject: Btrfs: allow treeid==0 in the inode lookup ioctl When a root id of 0 is sent to the inode lookup ioctl, it will use the root of the file we're ioctling and pass the root id back to userland along with the results. This allows userland to do searches based on that root later on. Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index fd757f576956..1e462de6556e 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1215,6 +1215,9 @@ static noinline int btrfs_ioctl_ino_lookup(struct file *file, } inode = fdentry(file)->d_inode; + if (args->treeid == 0) + args->treeid = BTRFS_I(inode)->root->root_key.objectid; + ret = btrfs_search_path_in_tree(BTRFS_I(inode)->root->fs_info, args->treeid, args->objectid, args->name); -- cgit v1.2.3 From 8ad6fcab564c5bc956bdc3dfa440ab152b6e780f Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Thu, 18 Mar 2010 12:23:10 -0400 Subject: Btrfs: fix the inode ref searches done by btrfs_search_path_in_tree This is used by the inode lookup ioctl to follow all the backrefs up to the subvol root. But the search being done would sometimes land one past the last item in the leaf instead of finding the backref. This changes the search to look for the highest possible backref and hop back one item. It also fixes a leaked path on failure to find the root. Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 1e462de6556e..2845c6ceecd2 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1147,12 +1147,13 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info, root = btrfs_read_fs_root_no_name(info, &key); if (IS_ERR(root)) { printk(KERN_ERR "could not find root %llu\n", tree_id); - return -ENOENT; + ret = -ENOENT; + goto out; } key.objectid = dirid; key.type = BTRFS_INODE_REF_KEY; - key.offset = 0; + key.offset = (u64)-1; while(1) { ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); @@ -1161,6 +1162,8 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info, l = path->nodes[0]; slot = path->slots[0]; + if (ret > 0 && slot > 0) + slot--; btrfs_item_key_to_cpu(l, &key, slot); if (ret > 0 && (key.objectid != dirid || @@ -1184,7 +1187,7 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info, btrfs_release_path(root, path); key.objectid = key.offset; - key.offset = 0; + key.offset = (u64)-1; dirid = key.objectid; } -- cgit v1.2.3 From 2a238a96358dde09aa52de9b9f97a383165ca651 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Mon, 1 Mar 2010 13:25:49 -0500 Subject: alpha: use __ratelimit Replace open-coded rate limiting logic with __ratelimit(). Signed-off-by: Akinobu Mita Cc: Richard Henderson Cc: Ivan Kokshaysky Cc: linux-alpha@vger.kernel.org Signed-off-by: Matt Turner --- arch/alpha/kernel/traps.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index 6ee7655b7568..b14f015008ad 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -771,8 +772,7 @@ asmlinkage void do_entUnaUser(void __user * va, unsigned long opcode, unsigned long reg, struct pt_regs *regs) { - static int cnt = 0; - static unsigned long last_time; + static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); unsigned long tmp1, tmp2, tmp3, tmp4; unsigned long fake_reg, *reg_addr = &fake_reg; @@ -783,15 +783,11 @@ do_entUnaUser(void __user * va, unsigned long opcode, with the unaliged access. */ if (!test_thread_flag (TIF_UAC_NOPRINT)) { - if (cnt >= 5 && time_after(jiffies, last_time + 5 * HZ)) { - cnt = 0; - } - if (++cnt < 5) { + if (__ratelimit(&ratelimit)) { printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n", current->comm, task_pid_nr(current), regs->pc - 4, va, opcode, reg); } - last_time = jiffies; } if (test_thread_flag (TIF_UAC_SIGBUS)) goto give_sigbus; -- cgit v1.2.3 From 7f2d889c9cdcd33e8942a23765623adbd468cf04 Mon Sep 17 00:00:00 2001 From: Frans Pop Date: Mon, 1 Mar 2010 13:29:14 -0500 Subject: alpha: remove trailing spaces in messages Signed-off-by: Frans Pop Cc: linux-alpha@vger.kernel.org Signed-off-by: Matt Turner --- arch/alpha/kernel/sys_dp264.c | 2 +- arch/alpha/kernel/sys_titan.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c index d64e1e497e76..4026502ab707 100644 --- a/arch/alpha/kernel/sys_dp264.c +++ b/arch/alpha/kernel/sys_dp264.c @@ -224,7 +224,7 @@ static void dp264_device_interrupt(unsigned long vector) { #if 1 - printk("dp264_device_interrupt: NOT IMPLEMENTED YET!! \n"); + printk("dp264_device_interrupt: NOT IMPLEMENTED YET!!\n"); #else unsigned long pld; unsigned int i; diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c index 288053342c83..9008d0f20c53 100644 --- a/arch/alpha/kernel/sys_titan.c +++ b/arch/alpha/kernel/sys_titan.c @@ -171,7 +171,7 @@ titan_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) static void titan_device_interrupt(unsigned long vector) { - printk("titan_device_interrupt: NOT IMPLEMENTED YET!! \n"); + printk("titan_device_interrupt: NOT IMPLEMENTED YET!!\n"); } static void -- cgit v1.2.3 From 5286944d75cbe21dd21b0d57335cc6206622b62a Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Thu, 18 Mar 2010 22:35:28 -0400 Subject: alpha: fix compile errors in dma-mapping-common.h The commit c186caca3dbe7f44da624cb4f9d78e1b1dfb13b8 leads to the following errors with CONFIG_ALPHA_{TSUNAMI, TITAN, RAWHIDE, MARVEL}: /home/fujita/git/linux-2.6/include/asm-generic/dma-mapping-common.h: In function `dma_map_sg_attrs': /home/fujita/git/linux-2.6/include/asm-generic/dma-mapping-common.h:49: error: implicit declaration of function `for_each_sg' /home/fujita/git/linux-2.6/include/asm-generic/dma-mapping-common.h:50: error: syntax error before "kmemcheck_mark_initialized" Signed-off-by: FUJITA Tomonori Reported-by: Michael Cree Signed-off-by: Matt Turner --- arch/alpha/include/asm/core_marvel.h | 1 - arch/alpha/include/asm/core_mcpcia.h | 1 - arch/alpha/include/asm/core_titan.h | 1 - arch/alpha/include/asm/core_tsunami.h | 1 - 4 files changed, 4 deletions(-) diff --git a/arch/alpha/include/asm/core_marvel.h b/arch/alpha/include/asm/core_marvel.h index 30d55fe7aaf6..dad300fa14ce 100644 --- a/arch/alpha/include/asm/core_marvel.h +++ b/arch/alpha/include/asm/core_marvel.h @@ -12,7 +12,6 @@ #define __ALPHA_MARVEL__H__ #include -#include #include #include diff --git a/arch/alpha/include/asm/core_mcpcia.h b/arch/alpha/include/asm/core_mcpcia.h index acf55b483472..21ac53383b37 100644 --- a/arch/alpha/include/asm/core_mcpcia.h +++ b/arch/alpha/include/asm/core_mcpcia.h @@ -6,7 +6,6 @@ #define MCPCIA_ONE_HAE_WINDOW 1 #include -#include #include /* diff --git a/arch/alpha/include/asm/core_titan.h b/arch/alpha/include/asm/core_titan.h index a17f6f33b68e..8cf79d1219e1 100644 --- a/arch/alpha/include/asm/core_titan.h +++ b/arch/alpha/include/asm/core_titan.h @@ -2,7 +2,6 @@ #define __ALPHA_TITAN__H__ #include -#include #include /* diff --git a/arch/alpha/include/asm/core_tsunami.h b/arch/alpha/include/asm/core_tsunami.h index 58d4fe48742c..8e39ecf09419 100644 --- a/arch/alpha/include/asm/core_tsunami.h +++ b/arch/alpha/include/asm/core_tsunami.h @@ -2,7 +2,6 @@ #define __ALPHA_TSUNAMI__H__ #include -#include #include /* -- cgit v1.2.3 From 409d241b7bb2cf0278186040ace1c4704fb2a82f Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Fri, 12 Mar 2010 13:16:02 +0000 Subject: powerpc: Use correct ccr bit for syscall error status The powerpc implementations of syscall_get_error and syscall_set_return_value should use CCR0:S0 (0x10000000) for testing and setting syscall error status. Fortunately these APIs don't seem to be used at the moment. Signed-off-by: Nathan Lynch Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/syscall.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h index efa7f0b879f3..23913e902fc3 100644 --- a/arch/powerpc/include/asm/syscall.h +++ b/arch/powerpc/include/asm/syscall.h @@ -30,7 +30,7 @@ static inline void syscall_rollback(struct task_struct *task, static inline long syscall_get_error(struct task_struct *task, struct pt_regs *regs) { - return (regs->ccr & 0x1000) ? -regs->gpr[3] : 0; + return (regs->ccr & 0x10000000) ? -regs->gpr[3] : 0; } static inline long syscall_get_return_value(struct task_struct *task, @@ -44,10 +44,10 @@ static inline void syscall_set_return_value(struct task_struct *task, int error, long val) { if (error) { - regs->ccr |= 0x1000L; + regs->ccr |= 0x10000000L; regs->gpr[3] = -error; } else { - regs->ccr &= ~0x1000L; + regs->ccr &= ~0x10000000L; regs->gpr[3] = val; } } -- cgit v1.2.3 From 09156a7a409cf93d1ca2706bf05d714879a5b7ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rton=20N=C3=A9meth?= Date: Sat, 6 Mar 2010 22:43:55 +0000 Subject: powerpc: Do not call prink when CONFIG_PRINTK is not defined MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When printk() is disabled (CONFIG_PRINTK) at menu item General setup -> Configure standard kernel features (for small systems) -> Enable support for printk then there should be no printk() calls at all. Signed-off-by: Márton Németh Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/head_fsl_booke.S | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index a7cf4934342c..725526547994 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -884,13 +884,17 @@ KernelSPE: lwz r3,_MSR(r1) oris r3,r3,MSR_SPE@h stw r3,_MSR(r1) /* enable use of SPE after return */ +#ifdef CONFIG_PRINTK lis r3,87f@h ori r3,r3,87f@l mr r4,r2 /* current */ lwz r5,_NIP(r1) bl printk +#endif b ret_from_except +#ifdef CONFIG_PRINTK 87: .string "SPE used in kernel (task=%p, pc=%x) \n" +#endif .align 4,0 #endif /* CONFIG_SPE */ -- cgit v1.2.3 From a93272969c6b1d59883fcbb04845420bd72c9a20 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Tue, 16 Mar 2010 13:16:25 +0000 Subject: powerpc: Fix swiotlb to respect the boot option powerpc initializes swiotlb before parsing the kernel boot options so swiotlb options (e.g. specifying the swiotlb buffer size) are ignored. Any time before freeing bootmem works for swiotlb so this patch moves powerpc's swiotlb initialization after parsing the kernel boot options, mem_init (as x86 does). Signed-off-by: FUJITA Tomonori Tested-by: Becky Bruce Tested-by: Albert Herranz Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/setup_32.c | 6 ------ arch/powerpc/kernel/setup_64.c | 6 ------ arch/powerpc/mm/mem.c | 6 ++++++ 3 files changed, 6 insertions(+), 12 deletions(-) diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index b152de3e64d4..8f58986c2ad9 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -39,7 +39,6 @@ #include #include #include -#include #include "setup.h" @@ -343,11 +342,6 @@ void __init setup_arch(char **cmdline_p) ppc_md.setup_arch(); if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab); -#ifdef CONFIG_SWIOTLB - if (ppc_swiotlb_enable) - swiotlb_init(1); -#endif - paging_init(); /* Initialize the MMU context management stuff */ diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 63547394048c..914389158a9b 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -61,7 +61,6 @@ #include #include #include -#include #include #include "setup.h" @@ -541,11 +540,6 @@ void __init setup_arch(char **cmdline_p) if (ppc_md.setup_arch) ppc_md.setup_arch(); -#ifdef CONFIG_SWIOTLB - if (ppc_swiotlb_enable) - swiotlb_init(1); -#endif - paging_init(); /* Initialize the MMU context management stuff */ diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 311224cdb7ad..448f972b22f5 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -48,6 +48,7 @@ #include #include #include +#include #include "mmu_decl.h" @@ -320,6 +321,11 @@ void __init mem_init(void) struct page *page; unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize; +#ifdef CONFIG_SWIOTLB + if (ppc_swiotlb_enable) + swiotlb_init(1); +#endif + num_physpages = lmb.memory.size >> PAGE_SHIFT; high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); -- cgit v1.2.3 From 191aee58b6568cf8143901bfa3f57a9b8faa6f1c Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Tue, 2 Mar 2010 14:25:38 +0000 Subject: powerpc: Remove IOMMU_VMERGE config option The description says: Cause IO segments sent to a device for DMA to be merged virtually by the IOMMU when they happen to have been allocated contiguously. This doesn't add pressure to the IOMMU allocator. However, some drivers don't support getting large merged segments coming back from *_map_sg(). Most drivers don't have this problem; it is safe to say Y here. It's out of date. Long ago, drivers didn't have a way to tell IOMMUs about their segment length limit (that is, the maximum segment length that they can handle). So IOMMUs merged as many segments as possible and gave too large segments to drivers. dma_get_max_seg_size() was introduced to solve the above problem. Device drives can use the API to tell IOMMU about the maximum segment length that they can handle. In addition, the default limit (64K) should be safe for everyone. So this config option seems to be unnecessary. Note that this config option just enables users to disable the virtual merging by default. Users can still disable the virtual merging by the boot parameter. Signed-off-by: FUJITA Tomonori Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/Kconfig | 13 ------------- arch/powerpc/kernel/iommu.c | 7 +------ 2 files changed, 1 insertion(+), 19 deletions(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 8a54eb8e3768..2e19500921f9 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -313,19 +313,6 @@ config 8XX_MINIMAL_FPEMU It is recommended that you build a soft-float userspace instead. -config IOMMU_VMERGE - bool "Enable IOMMU virtual merging" - depends on PPC64 - default y - help - Cause IO segments sent to a device for DMA to be merged virtually - by the IOMMU when they happen to have been allocated contiguously. - This doesn't add pressure to the IOMMU allocator. However, some - drivers don't support getting large merged segments coming back - from *_map_sg(). - - Most drivers don't have this problem; it is safe to say Y here. - config IOMMU_HELPER def_bool PPC64 diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 5547ae6e6b0b..ec94f906ea43 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -42,12 +42,7 @@ #define DBG(...) -#ifdef CONFIG_IOMMU_VMERGE -static int novmerge = 0; -#else -static int novmerge = 1; -#endif - +static int novmerge; static int protect4gb = 1; static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int); -- cgit v1.2.3 From e1955ca0ee55286cbc65a8ed7471d540ae83dac8 Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Tue, 9 Mar 2010 19:30:28 +0100 Subject: sysfs: use sysfs_bin_attr_init in firmware class driver Annotate dynamic sysfs attribute in fw_setup_device(). This gets rid of the following lockdep warning: bnx2 0000:08:00.0: firmware: requesting bnx2/bnx2-mips-06-5.0.0.j6.fw BUG: key ffff880008293470 not in .data! ------------[ cut here ]------------ WARNING: at kernel/lockdep.c:2706 lockdep_init_map+0x562/0x620() Modules linked in: bnx2(+) sg tpm_bios floppy rtc_lib usb_storage i2c_piix4 joydev button container shpchp i2c_core sr_mod cdrom pci_hotplug usbhid hid ohci_hcd ehci_hcd sd_mod usbcore edd ext3 mbcache jbd fan ata_generic sata_svw pata_serverworks libata scsi_mod thermal processor Pid: 1915, comm: work_for_cpu Not tainted 2.6.34-rc1-default #81 Call Trace: [] ? lockdep_init_map+0x562/0x620 [] warn_slowpath_common+0x78/0xd0 [] warn_slowpath_null+0xf/0x20 [] lockdep_init_map+0x562/0x620 [] ? sysfs_new_dirent+0x76/0x120 [] ? put_device+0x12/0x20 [] sysfs_add_file_mode+0x6c/0xd0 [] sysfs_add_file+0xc/0x10 [] sysfs_create_bin_file+0x21/0x30 [] _request_firmware+0x2f1/0x650 [] request_firmware+0xe/0x10 [] bnx2_init_one+0x8f5/0x177e [bnx2] [] ? _raw_spin_unlock_irq+0x2b/0x40 [] ? finish_task_switch+0x69/0x100 [] ? finish_task_switch+0x0/0x100 [] ? do_work_for_cpu+0x0/0x30 [] local_pci_probe+0x12/0x20 [] do_work_for_cpu+0x13/0x30 [] ? do_work_for_cpu+0x0/0x30 [] kthread+0x96/0xa0 [] kernel_thread_helper+0x4/0x10 [] ? restore_args+0x0/0x30 [] ? kthread+0x0/0xa0 [] ? kernel_thread_helper+0x0/0x10 ---[ end trace a2ecee9c9602d195 ]--- Cc: Eric W. Biederman Cc: Greg Kroah-Hartman Signed-off-by: Jiri Kosina Signed-off-by: Greg Kroah-Hartman --- drivers/base/firmware_class.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index d0dc26ad5387..fc7565ce7e86 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -442,6 +442,7 @@ static int fw_setup_device(struct firmware *fw, struct device **dev_p, fw_priv = dev_get_drvdata(f_dev); fw_priv->fw = fw; + sysfs_bin_attr_init(&fw_priv->attr_data); retval = sysfs_create_bin_file(&f_dev->kobj, &fw_priv->attr_data); if (retval) { dev_err(device, "%s: sysfs_create_bin_file failed\n", __func__); -- cgit v1.2.3 From 6757eca348fbbdd4ab1020e565f325cd6a6b2698 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Wed, 10 Mar 2010 22:48:34 +0000 Subject: sysfs: Initialised pci bus legacy_mem field before use PPC64 is failing to boot the latest mmotm due to an uninitialised pointer in pci_create_legacy_files(). The surprise is that machines boot at all and it would appear to affect current mainline as well. This patch fixes the problem. Signed-off-by: Mel Gorman Signed-off-by: Greg Kroah-Hartman --- drivers/pci/pci-sysfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index de296452c957..997668558e79 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -655,8 +655,8 @@ void pci_create_legacy_files(struct pci_bus *b) goto legacy_io_err; /* Allocated above after the legacy_io struct */ - sysfs_bin_attr_init(b->legacy_mem); b->legacy_mem = b->legacy_io + 1; + sysfs_bin_attr_init(b->legacy_mem); b->legacy_mem->attr.name = "legacy_mem"; b->legacy_mem->size = 1024*1024; b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR; -- cgit v1.2.3 From c7df670bf702d1c25ae22b4cd49deb05c1e55ecc Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Mon, 15 Mar 2010 13:59:51 -0700 Subject: sysfs: fix sysfs lockdep warning in ipmi code This fixes a sysfs lockdep warning in the ipmi code. Thanks to Eric Biederman and Yinghai Lu for the original versions of the patch, unfortunatly they did not submit them in a form they could be applied in. Cc: Yinghai Lu Cc: Eric Biederman Signed-off-by: Greg Kroah-Hartman --- drivers/char/ipmi/ipmi_msghandler.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index ec5e3f8df648..c6ad4234378d 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -2272,42 +2272,52 @@ static int create_files(struct bmc_device *bmc) bmc->device_id_attr.attr.name = "device_id"; bmc->device_id_attr.attr.mode = S_IRUGO; bmc->device_id_attr.show = device_id_show; + sysfs_attr_init(&bmc->device_id_attr.attr); bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs"; bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO; bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show; + sysfs_attr_init(&bmc->provides_dev_sdrs_attr.attr); bmc->revision_attr.attr.name = "revision"; bmc->revision_attr.attr.mode = S_IRUGO; bmc->revision_attr.show = revision_show; + sysfs_attr_init(&bmc->revision_attr.attr); bmc->firmware_rev_attr.attr.name = "firmware_revision"; bmc->firmware_rev_attr.attr.mode = S_IRUGO; bmc->firmware_rev_attr.show = firmware_rev_show; + sysfs_attr_init(&bmc->firmware_rev_attr.attr); bmc->version_attr.attr.name = "ipmi_version"; bmc->version_attr.attr.mode = S_IRUGO; bmc->version_attr.show = ipmi_version_show; + sysfs_attr_init(&bmc->version_attr.attr); bmc->add_dev_support_attr.attr.name = "additional_device_support"; bmc->add_dev_support_attr.attr.mode = S_IRUGO; bmc->add_dev_support_attr.show = add_dev_support_show; + sysfs_attr_init(&bmc->add_dev_support_attr.attr); bmc->manufacturer_id_attr.attr.name = "manufacturer_id"; bmc->manufacturer_id_attr.attr.mode = S_IRUGO; bmc->manufacturer_id_attr.show = manufacturer_id_show; + sysfs_attr_init(&bmc->manufacturer_id_attr.attr); bmc->product_id_attr.attr.name = "product_id"; bmc->product_id_attr.attr.mode = S_IRUGO; bmc->product_id_attr.show = product_id_show; + sysfs_attr_init(&bmc->product_id_attr.attr); bmc->guid_attr.attr.name = "guid"; bmc->guid_attr.attr.mode = S_IRUGO; bmc->guid_attr.show = guid_show; + sysfs_attr_init(&bmc->guid_attr.attr); bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision"; bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO; bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show; + sysfs_attr_init(&bmc->aux_firmware_rev_attr.attr); err = device_create_file(&bmc->dev->dev, &bmc->device_id_attr); -- cgit v1.2.3 From 21e3bde964e873bb5d3b1dfef68294b1437fe678 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Mon, 15 Mar 2010 14:01:25 -0700 Subject: sysfs: fix sysfs lockdep warning in infiniband code This fixes a sysfs lockdep warning in the infiniband code. Cc: Yinghai Lu Cc: Eric Biederman Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/core/sysfs.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 1558bb7fc74d..f901957abc8b 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -461,6 +461,7 @@ alloc_group_attrs(ssize_t (*show)(struct ib_port *, element->attr.attr.mode = S_IRUGO; element->attr.show = show; element->index = i; + sysfs_attr_init(&element->attr.attr); tab_attr[i] = &element->attr.attr; } -- cgit v1.2.3 From 3691c964fa1a8f0eb5e5f00c644ef1bdd7e35a95 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Mon, 15 Mar 2010 14:01:55 -0700 Subject: sysfs: fix sysfs lockdep warning in mlx4 code This fixes a sysfs lockdep warning in the mlx4 code. Cc: Yinghai Lu Cc: Eric Biederman Signed-off-by: Greg Kroah-Hartman --- drivers/net/mlx4/main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index 8f6e816a7395..b402a95c87c7 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c @@ -1023,6 +1023,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port) info->port_attr.attr.mode = S_IRUGO | S_IWUSR; info->port_attr.show = show_port_type; info->port_attr.store = set_port_type; + sysfs_attr_init(&info->port_attr.attr); err = device_create_file(&dev->pdev->dev, &info->port_attr); if (err) { -- cgit v1.2.3 From 4d26e139f0b7d4c0700d6993506f1f60e2f2caa5 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 10 Mar 2010 20:50:38 +0900 Subject: Driver core: Early platform kernel-doc update This patch updates the kernel-doc notation for early platform functions. Signed-off-by: Magnus Damm Acked-by: Randy Dunlap Signed-off-by: Greg Kroah-Hartman --- drivers/base/platform.c | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 1ba9d617d241..f6bcf22e6bd1 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -1052,9 +1052,11 @@ static __initdata LIST_HEAD(early_platform_driver_list); static __initdata LIST_HEAD(early_platform_device_list); /** - * early_platform_driver_register + * early_platform_driver_register - register early platform driver * @epdrv: early_platform driver structure * @buf: string passed from early_param() + * + * Helper function for early_platform_init() / early_platform_init_buffer() */ int __init early_platform_driver_register(struct early_platform_driver *epdrv, char *buf) @@ -1106,9 +1108,12 @@ int __init early_platform_driver_register(struct early_platform_driver *epdrv, } /** - * early_platform_add_devices - add a numbers of early platform devices + * early_platform_add_devices - adds a number of early platform devices * @devs: array of early platform devices to add * @num: number of early platform devices in array + * + * Used by early architecture code to register early platform devices and + * their platform data. */ void __init early_platform_add_devices(struct platform_device **devs, int num) { @@ -1128,8 +1133,12 @@ void __init early_platform_add_devices(struct platform_device **devs, int num) } /** - * early_platform_driver_register_all + * early_platform_driver_register_all - register early platform drivers * @class_str: string to identify early platform driver class + * + * Used by architecture code to register all early platform drivers + * for a certain class. If omitted then only early platform drivers + * with matching kernel command line class parameters will be registered. */ void __init early_platform_driver_register_all(char *class_str) { @@ -1151,7 +1160,7 @@ void __init early_platform_driver_register_all(char *class_str) } /** - * early_platform_match + * early_platform_match - find early platform device matching driver * @epdrv: early platform driver structure * @id: id to match against */ @@ -1169,7 +1178,7 @@ early_platform_match(struct early_platform_driver *epdrv, int id) } /** - * early_platform_left + * early_platform_left - check if early platform driver has matching devices * @epdrv: early platform driver structure * @id: return true if id or above exists */ @@ -1187,7 +1196,7 @@ static __init int early_platform_left(struct early_platform_driver *epdrv, } /** - * early_platform_driver_probe_id + * early_platform_driver_probe_id - probe drivers matching class_str and id * @class_str: string to identify early platform driver class * @id: id to match against * @nr_probe: number of platform devices to successfully probe before exiting @@ -1257,10 +1266,14 @@ static int __init early_platform_driver_probe_id(char *class_str, } /** - * early_platform_driver_probe + * early_platform_driver_probe - probe a class of registered drivers * @class_str: string to identify early platform driver class * @nr_probe: number of platform devices to successfully probe before exiting * @user_only: only probe user specified early platform devices + * + * Used by architecture code to probe registered early platform drivers + * within a certain class. For probe to happen a registered early platform + * device matching a registered early platform driver is needed. */ int __init early_platform_driver_probe(char *class_str, int nr_probe, -- cgit v1.2.3 From e59817bf089a3893e05a059026c668fb65f8c748 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 10 Mar 2010 11:47:58 -0800 Subject: driver-core: fix missing kernel-doc in firmware_class Fix kernel-doc warning in firmware_class.c: Warning(drivers/base/firmware_class.c:94): No description found for parameter 'attr' Signed-off-by: Randy Dunlap Signed-off-by: Greg Kroah-Hartman --- drivers/base/firmware_class.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index fc7565ce7e86..18518ba13c81 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -78,6 +78,7 @@ firmware_timeout_show(struct class *class, /** * firmware_timeout_store - set number of seconds to wait for firmware * @class: device class pointer + * @attr: device attribute pointer * @buf: buffer to scan for timeout value * @count: number of bytes in @buf * -- cgit v1.2.3 From 462bd295a3d74c7d1641501bda549bccf404be5b Mon Sep 17 00:00:00 2001 From: "Robert P. J. Day" Date: Thu, 11 Mar 2010 07:59:09 -0500 Subject: kobject: documentation: Fix erroneous example in kobject doc. Replace uio_mem example for kobjects with uio_map, since the uio_mem struct no longer contains a kobject. Signed-off-by: Robert P. J. Day Signed-off-by: Greg Kroah-Hartman --- Documentation/kobject.txt | 57 +++++++++++++++++++++++++++++++---------------- 1 file changed, 38 insertions(+), 19 deletions(-) diff --git a/Documentation/kobject.txt b/Documentation/kobject.txt index bdb13817e1e9..668cb83d9561 100644 --- a/Documentation/kobject.txt +++ b/Documentation/kobject.txt @@ -59,37 +59,56 @@ nice to have in other objects. The C language does not allow for the direct expression of inheritance, so other techniques - such as structure embedding - must be used. -So, for example, the UIO code has a structure that defines the memory -region associated with a uio device: +(As an aside, for those familiar with the kernel linked list implementation, +this is analogous as to how "list_head" structs are rarely useful on +their own, but are invariably found embedded in the larger objects of +interest.) -struct uio_mem { +So, for example, the UIO code in drivers/uio/uio.c has a structure that +defines the memory region associated with a uio device: + + struct uio_map { struct kobject kobj; - unsigned long addr; - unsigned long size; - int memtype; - void __iomem *internal_addr; -}; + struct uio_mem *mem; + }; -If you have a struct uio_mem structure, finding its embedded kobject is +If you have a struct uio_map structure, finding its embedded kobject is just a matter of using the kobj member. Code that works with kobjects will often have the opposite problem, however: given a struct kobject pointer, what is the pointer to the containing structure? You must avoid tricks (such as assuming that the kobject is at the beginning of the structure) and, instead, use the container_of() macro, found in : - container_of(pointer, type, member) + container_of(pointer, type, member) + +where: + + * "pointer" is the pointer to the embedded kobject, + * "type" is the type of the containing structure, and + * "member" is the name of the structure field to which "pointer" points. + +The return value from container_of() is a pointer to the corresponding +container type. So, for example, a pointer "kp" to a struct kobject +embedded *within* a struct uio_map could be converted to a pointer to the +*containing* uio_map structure with: + + struct uio_map *u_map = container_of(kp, struct uio_map, kobj); + +For convenience, programmers often define a simple macro for "back-casting" +kobject pointers to the containing type. Exactly this happens in the +earlier drivers/uio/uio.c, as you can see here: + + struct uio_map { + struct kobject kobj; + struct uio_mem *mem; + }; -where pointer is the pointer to the embedded kobject, type is the type of -the containing structure, and member is the name of the structure field to -which pointer points. The return value from container_of() is a pointer to -the given type. So, for example, a pointer "kp" to a struct kobject -embedded within a struct uio_mem could be converted to a pointer to the -containing uio_mem structure with: + #define to_map(map) container_of(map, struct uio_map, kobj) - struct uio_mem *u_mem = container_of(kp, struct uio_mem, kobj); +where the macro argument "map" is a pointer to the struct kobject in +question. That macro is subsequently invoked with: -Programmers often define a simple macro for "back-casting" kobject pointers -to the containing type. + struct uio_map *map = to_map(kobj); Initialization of kobjects -- cgit v1.2.3 From 67fc233f4fb67907861b4077cea5294035f80dc7 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Tue, 16 Mar 2010 10:33:32 +1100 Subject: sysdev: the cpu probe/release attributes should be sysdev_class_attributes This fixes these warnings: drivers/base/cpu.c:264: warning: initialization from incompatible pointer type drivers/base/cpu.c:265: warning: initialization from incompatible pointer type Cc: Andi Kleen Signed-off-by: Stephen Rothwell Signed-off-by: Greg Kroah-Hartman --- drivers/base/cpu.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 7036e8e96ab8..b5242e1e8bc4 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -79,24 +79,24 @@ void unregister_cpu(struct cpu *cpu) } #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE -static ssize_t cpu_probe_store(struct sys_device *dev, - struct sysdev_attribute *attr, - const char *buf, +static ssize_t cpu_probe_store(struct sysdev_class *class, + struct sysdev_class_attribute *attr, + const char *buf, size_t count) { return arch_cpu_probe(buf, count); } -static ssize_t cpu_release_store(struct sys_device *dev, - struct sysdev_attribute *attr, - const char *buf, +static ssize_t cpu_release_store(struct sysdev_class *class, + struct sysdev_class_attribute *attr, + const char *buf, size_t count) { return arch_cpu_release(buf, count); } -static SYSDEV_ATTR(probe, S_IWUSR, NULL, cpu_probe_store); -static SYSDEV_ATTR(release, S_IWUSR, NULL, cpu_release_store); +static SYSDEV_CLASS_ATTR(probe, S_IWUSR, NULL, cpu_probe_store); +static SYSDEV_CLASS_ATTR(release, S_IWUSR, NULL, cpu_release_store); #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ #else /* ... !CONFIG_HOTPLUG_CPU */ -- cgit v1.2.3 From 178a5b35b2777346206d4b577b36d10061732f8c Mon Sep 17 00:00:00 2001 From: "Robert P. J. Day" Date: Fri, 12 Mar 2010 07:30:35 -0500 Subject: kobject: documentation: Update to refer to kset-example.c. Signed-off-by: Robert P. J. Day Signed-off-by: Greg Kroah-Hartman --- Documentation/kobject.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Documentation/kobject.txt b/Documentation/kobject.txt index 668cb83d9561..3ab2472509cb 100644 --- a/Documentation/kobject.txt +++ b/Documentation/kobject.txt @@ -406,4 +406,5 @@ called, and the objects in the former circle release each other. Example code to copy from For a more complete example of using ksets and kobjects properly, see the -sample/kobject/kset-example.c code. +example programs samples/kobject/{kobject-example.c,kset-example.c}, +which will be built as loadable modules if you select CONFIG_SAMPLE_KOBJECT. -- cgit v1.2.3 From f0eae0ed3b7d4182a6b4dd03540a738518ea3163 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 11 Mar 2010 18:11:45 +0200 Subject: driver-core: document ERR_PTR() return values A number of functions in the driver core return ERR_PTR() values on error. Document this in the kernel-doc of the functions. Signed-off-by: Jani Nikula Signed-off-by: Greg Kroah-Hartman --- drivers/base/class.c | 2 ++ drivers/base/core.c | 6 ++++++ drivers/base/platform.c | 6 ++++++ 3 files changed, 14 insertions(+) diff --git a/drivers/base/class.c b/drivers/base/class.c index 0147f476b8a9..9c6a0d6408e7 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c @@ -219,6 +219,8 @@ static void class_create_release(struct class *cls) * This is used to create a struct class pointer that can then be used * in calls to device_create(). * + * Returns &struct class pointer on success, or ERR_PTR() on error. + * * Note, the pointer created here is to be destroyed when finished by * making a call to class_destroy(). */ diff --git a/drivers/base/core.c b/drivers/base/core.c index ef55df34ddd0..b56a0ba31d4a 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -1345,6 +1345,8 @@ static void root_device_release(struct device *dev) * 'module' symlink which points to the @owner directory * in sysfs. * + * Returns &struct device pointer on success, or ERR_PTR() on error. + * * Note: You probably want to use root_device_register(). */ struct device *__root_device_register(const char *name, struct module *owner) @@ -1432,6 +1434,8 @@ static void device_create_release(struct device *dev) * Any further sysfs files that might be required can be created using this * pointer. * + * Returns &struct device pointer on success, or ERR_PTR() on error. + * * Note: the struct class passed to this function must have previously * been created with a call to class_create(). */ @@ -1492,6 +1496,8 @@ EXPORT_SYMBOL_GPL(device_create_vargs); * Any further sysfs files that might be required can be created using this * pointer. * + * Returns &struct device pointer on success, or ERR_PTR() on error. + * * Note: the struct class passed to this function must have previously * been created with a call to class_create(). */ diff --git a/drivers/base/platform.c b/drivers/base/platform.c index f6bcf22e6bd1..4b4b565c835f 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -362,6 +362,8 @@ EXPORT_SYMBOL_GPL(platform_device_unregister); * enumeration tasks, they don't fully conform to the Linux driver model. * In particular, when such drivers are built as modules, they can't be * "hotplugged". + * + * Returns &struct platform_device pointer on success, or ERR_PTR() on error. */ struct platform_device *platform_device_register_simple(const char *name, int id, @@ -408,6 +410,8 @@ EXPORT_SYMBOL_GPL(platform_device_register_simple); * allocated for the device allows drivers using such devices to be * unloaded without waiting for the last reference to the device to be * dropped. + * + * Returns &struct platform_device pointer on success, or ERR_PTR() on error. */ struct platform_device *platform_device_register_data( struct device *parent, @@ -559,6 +563,8 @@ EXPORT_SYMBOL_GPL(platform_driver_probe); * * Use this in legacy-style modules that probe hardware directly and * register a single platform device and corresponding platform driver. + * + * Returns &struct platform_device pointer on success, or ERR_PTR() on error. */ struct platform_device * __init_or_module platform_create_bundle( struct platform_driver *driver, -- cgit v1.2.3 From 12ee3c0a0ac42bed0939420468fd35f5cdceae4f Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Wed, 10 Mar 2010 14:50:21 -0800 Subject: driver core: numa: fix BUILD_BUG_ON for node_read_distance node_read_distance() has a BUILD_BUG_ON() to prevent buffer overruns when the number of nodes printed will exceed the buffer length. Each node only needs four chars: three for distance (maximum distance is 255) and one for a seperating space or a trailing newline. Signed-off-by: David Rientjes Cc: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- drivers/base/node.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/base/node.c b/drivers/base/node.c index ad43185ec15a..93b3ac65c2d4 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -165,8 +165,11 @@ static ssize_t node_read_distance(struct sys_device * dev, int len = 0; int i; - /* buf currently PAGE_SIZE, need ~4 chars per node */ - BUILD_BUG_ON(MAX_NUMNODES*4 > PAGE_SIZE/2); + /* + * buf is currently PAGE_SIZE in length and each node needs 4 chars + * at the most (distance + space or newline). + */ + BUILD_BUG_ON(MAX_NUMNODES * 4 > PAGE_SIZE); for_each_online_node(i) len += sprintf(buf + len, "%s%d", i ? " " : "", node_distance(nid, i)); -- cgit v1.2.3 From 87a6aca504d65f242589583e04df5e74b5eae1fe Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Mon, 15 Mar 2010 17:14:15 -0700 Subject: Revert "tty: Add a new VT mode which is like VT_PROCESS but doesn't require a VT_RELDISP ioctl call" This reverts commit eec9fe7d1ab4a0dfac4cb43047a7657fffd0002f. Ari writes as the reason this should be reverted: The problems with this patch include: 1. There's at least one subtlety I overlooked - switching between X servers (i.e. from one X VT to another) still requires the cooperation of both X servers. I was assuming that KMS eliminated this. 2. It hasn't been tested at all (no X server patch exists which uses the new mode). As he was the original author of the patch, I'll revert it. Cc: Ari Entlich Cc: Alan Cox Signed-off-by: Greg Kroah-Hartman --- drivers/char/vt_ioctl.c | 39 +++++++++++++++++++-------------------- include/linux/vt.h | 3 +-- 2 files changed, 20 insertions(+), 22 deletions(-) diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c index 87778dcf8727..6aa10284104a 100644 --- a/drivers/char/vt_ioctl.c +++ b/drivers/char/vt_ioctl.c @@ -888,7 +888,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, ret = -EFAULT; goto out; } - if (tmp.mode != VT_AUTO && tmp.mode != VT_PROCESS && tmp.mode != VT_PROCESS_AUTO) { + if (tmp.mode != VT_AUTO && tmp.mode != VT_PROCESS) { ret = -EINVAL; goto out; } @@ -1622,7 +1622,7 @@ static void complete_change_console(struct vc_data *vc) * telling it that it has acquired. Also check if it has died and * clean up (similar to logic employed in change_console()) */ - if (vc->vt_mode.mode == VT_PROCESS || vc->vt_mode.mode == VT_PROCESS_AUTO) { + if (vc->vt_mode.mode == VT_PROCESS) { /* * Send the signal as privileged - kill_pid() will * tell us if the process has gone or something else @@ -1682,7 +1682,7 @@ void change_console(struct vc_data *new_vc) * vt to auto control. */ vc = vc_cons[fg_console].d; - if (vc->vt_mode.mode == VT_PROCESS || vc->vt_mode.mode == VT_PROCESS_AUTO) { + if (vc->vt_mode.mode == VT_PROCESS) { /* * Send the signal as privileged - kill_pid() will * tell us if the process has gone or something else @@ -1693,28 +1693,27 @@ void change_console(struct vc_data *new_vc) */ vc->vt_newvt = new_vc->vc_num; if (kill_pid(vc->vt_pid, vc->vt_mode.relsig, 1) == 0) { - if(vc->vt_mode.mode == VT_PROCESS) - /* - * It worked. Mark the vt to switch to and - * return. The process needs to send us a - * VT_RELDISP ioctl to complete the switch. - */ - return; - } else { /* - * The controlling process has died, so we revert back to - * normal operation. In this case, we'll also change back - * to KD_TEXT mode. I'm not sure if this is strictly correct - * but it saves the agony when the X server dies and the screen - * remains blanked due to KD_GRAPHICS! It would be nice to do - * this outside of VT_PROCESS but there is no single process - * to account for and tracking tty count may be undesirable. + * It worked. Mark the vt to switch to and + * return. The process needs to send us a + * VT_RELDISP ioctl to complete the switch. */ - reset_vc(vc); + return; } /* - * Fall through to normal (VT_AUTO and VT_PROCESS_AUTO) handling of the switch... + * The controlling process has died, so we revert back to + * normal operation. In this case, we'll also change back + * to KD_TEXT mode. I'm not sure if this is strictly correct + * but it saves the agony when the X server dies and the screen + * remains blanked due to KD_GRAPHICS! It would be nice to do + * this outside of VT_PROCESS but there is no single process + * to account for and tracking tty count may be undesirable. + */ + reset_vc(vc); + + /* + * Fall through to normal (VT_AUTO) handling of the switch... */ } diff --git a/include/linux/vt.h b/include/linux/vt.h index 778b7b2a47d4..d5dd0bc408fd 100644 --- a/include/linux/vt.h +++ b/include/linux/vt.h @@ -27,7 +27,7 @@ struct vt_mode { #define VT_SETMODE 0x5602 /* set mode of active vt */ #define VT_AUTO 0x00 /* auto vt switching */ #define VT_PROCESS 0x01 /* process controls switching */ -#define VT_PROCESS_AUTO 0x02 /* process is notified of switching */ +#define VT_ACKACQ 0x02 /* acknowledge switch */ struct vt_stat { unsigned short v_active; /* active vt */ @@ -38,7 +38,6 @@ struct vt_stat { #define VT_SENDSIG 0x5604 /* signal to send to bitmask of vts */ #define VT_RELDISP 0x5605 /* release display */ -#define VT_ACKACQ 0x02 /* acknowledge switch */ #define VT_ACTIVATE 0x5606 /* make vt active */ #define VT_WAITACTIVE 0x5607 /* wait for vt active */ -- cgit v1.2.3 From 352fa6ad16b89f8ffd1a93b4419b1a8f2259feab Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Tue, 2 Mar 2010 22:24:19 +0000 Subject: tty: Take a 256 byte padding into account when buffering below sub-page units The TTY layer takes some care to ensure that only sub-page allocations are made with interrupts disabled. It does this by setting a goal of "TTY_BUFFER_PAGE" to allocate. Unfortunately, while TTY_BUFFER_PAGE takes the size of tty_buffer into account, it fails to account that tty_buffer_find() rounds the buffer size out to the next 256 byte boundary before adding on the size of the tty_buffer. This patch adjusts the TTY_BUFFER_PAGE calculation to take into account the size of the tty_buffer and the padding. Once applied, tty_buffer_alloc() should not require high-order allocations. Signed-off-by: Mel Gorman Cc: stable Signed-off-by: Greg Kroah-Hartman --- include/linux/tty.h | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/include/linux/tty.h b/include/linux/tty.h index 568369a86306..593228a520e1 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -70,12 +70,13 @@ struct tty_buffer { /* * We default to dicing tty buffer allocations to this many characters - * in order to avoid multiple page allocations. We assume tty_buffer itself - * is under 256 bytes. See tty_buffer_find for the allocation logic this - * must match + * in order to avoid multiple page allocations. We know the size of + * tty_buffer itself but it must also be taken into account that the + * the buffer is 256 byte aligned. See tty_buffer_find for the allocation + * logic this must match */ -#define TTY_BUFFER_PAGE ((PAGE_SIZE - 256) / 2) +#define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF) struct tty_bufhead { -- cgit v1.2.3 From f157b58511e56d418eb582de96fedc4ea03d8061 Mon Sep 17 00:00:00 2001 From: David Miller Date: Wed, 3 Mar 2010 02:50:26 -0800 Subject: uartlite: Fix build on sparc. We can get this driver enabled via MFD_TIMBERDALE which only requires GPIO to be on. But the of_address_to_resource() function is only present on powerpc and microblaze, so we have to conditionalize the CONFIG_OF probing bits on that. Signed-off-by: David S. Miller Acked-by: Grant Likely Signed-off-by: Greg Kroah-Hartman --- drivers/serial/uartlite.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/serial/uartlite.c b/drivers/serial/uartlite.c index ab2ab3c81834..f0a6c61b17f7 100644 --- a/drivers/serial/uartlite.c +++ b/drivers/serial/uartlite.c @@ -19,7 +19,7 @@ #include #include #include -#if defined(CONFIG_OF) +#if defined(CONFIG_OF) && (defined(CONFIG_PPC32) || defined(CONFIG_MICROBLAZE)) #include #include #include @@ -581,7 +581,7 @@ static struct platform_driver ulite_platform_driver = { /* --------------------------------------------------------------------- * OF bus bindings */ -#if defined(CONFIG_OF) +#if defined(CONFIG_OF) && (defined(CONFIG_PPC32) || defined(CONFIG_MICROBLAZE)) static int __devinit ulite_of_probe(struct of_device *op, const struct of_device_id *match) { @@ -631,11 +631,11 @@ static inline void __exit ulite_of_unregister(void) { of_unregister_platform_driver(&ulite_of_driver); } -#else /* CONFIG_OF */ -/* CONFIG_OF not enabled; do nothing helpers */ +#else /* CONFIG_OF && (CONFIG_PPC32 || CONFIG_MICROBLAZE) */ +/* Appropriate config not enabled; do nothing helpers */ static inline int __init ulite_of_register(void) { return 0; } static inline void __exit ulite_of_unregister(void) { } -#endif /* CONFIG_OF */ +#endif /* CONFIG_OF && (CONFIG_PPC32 || CONFIG_MICROBLAZE) */ /* --------------------------------------------------------------------- * Module setup/teardown -- cgit v1.2.3 From e74d098c66543d0731de62eb747ccd5b636a6f4c Mon Sep 17 00:00:00 2001 From: Amit Shah Date: Fri, 12 Mar 2010 11:53:15 +0530 Subject: hvc_console: Fix race between hvc_close and hvc_remove Alan pointed out a race in the code where hvc_remove is invoked. The recent virtio_console work is the first user of hvc_remove(). Alan describes it thus: The hvc_console assumes that a close and remove call can't occur at the same time. In addition tty_hangup(tty) is problematic as tty_hangup is asynchronous itself.... So this can happen hvc_close hvc_remove hung up ? - no lock tty = hp->tty unlock lock hp->tty = NULL unlock notify del kref_put the hvc struct close completes tty is destroyed tty_hangup dead tty tty->ops will be NULL NULL->... This patch adds some tty krefs and also converts to using tty_vhangup(). Reported-by: Alan Cox Signed-off-by: Amit Shah CC: Alan Cox CC: linuxppc-dev@ozlabs.org CC: Rusty Russell Signed-off-by: Greg Kroah-Hartman --- drivers/char/hvc_console.c | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c index 465185fc0f52..ba55bba151b9 100644 --- a/drivers/char/hvc_console.c +++ b/drivers/char/hvc_console.c @@ -312,6 +312,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp) spin_lock_irqsave(&hp->lock, flags); /* Check and then increment for fast path open. */ if (hp->count++ > 0) { + tty_kref_get(tty); spin_unlock_irqrestore(&hp->lock, flags); hvc_kick(); return 0; @@ -319,7 +320,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp) tty->driver_data = hp; - hp->tty = tty; + hp->tty = tty_kref_get(tty); spin_unlock_irqrestore(&hp->lock, flags); @@ -336,6 +337,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp) spin_lock_irqsave(&hp->lock, flags); hp->tty = NULL; spin_unlock_irqrestore(&hp->lock, flags); + tty_kref_put(tty); tty->driver_data = NULL; kref_put(&hp->kref, destroy_hvc_struct); printk(KERN_ERR "hvc_open: request_irq failed with rc %d.\n", rc); @@ -363,13 +365,18 @@ static void hvc_close(struct tty_struct *tty, struct file * filp) return; hp = tty->driver_data; + spin_lock_irqsave(&hp->lock, flags); + tty_kref_get(tty); if (--hp->count == 0) { /* We are done with the tty pointer now. */ hp->tty = NULL; spin_unlock_irqrestore(&hp->lock, flags); + /* Put the ref obtained in hvc_open() */ + tty_kref_put(tty); + if (hp->ops->notifier_del) hp->ops->notifier_del(hp, hp->data); @@ -389,6 +396,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp) spin_unlock_irqrestore(&hp->lock, flags); } + tty_kref_put(tty); kref_put(&hp->kref, destroy_hvc_struct); } @@ -424,10 +432,11 @@ static void hvc_hangup(struct tty_struct *tty) spin_unlock_irqrestore(&hp->lock, flags); if (hp->ops->notifier_hangup) - hp->ops->notifier_hangup(hp, hp->data); + hp->ops->notifier_hangup(hp, hp->data); while(temp_open_count) { --temp_open_count; + tty_kref_put(tty); kref_put(&hp->kref, destroy_hvc_struct); } } @@ -592,7 +601,7 @@ int hvc_poll(struct hvc_struct *hp) } /* No tty attached, just skip */ - tty = hp->tty; + tty = tty_kref_get(hp->tty); if (tty == NULL) goto bail; @@ -672,6 +681,8 @@ int hvc_poll(struct hvc_struct *hp) tty_flip_buffer_push(tty); } + if (tty) + tty_kref_put(tty); return poll_mask; } @@ -807,7 +818,7 @@ int hvc_remove(struct hvc_struct *hp) struct tty_struct *tty; spin_lock_irqsave(&hp->lock, flags); - tty = hp->tty; + tty = tty_kref_get(hp->tty); if (hp->index < MAX_NR_HVC_CONSOLES) vtermnos[hp->index] = -1; @@ -819,18 +830,18 @@ int hvc_remove(struct hvc_struct *hp) /* * We 'put' the instance that was grabbed when the kref instance * was initialized using kref_init(). Let the last holder of this - * kref cause it to be removed, which will probably be the tty_hangup + * kref cause it to be removed, which will probably be the tty_vhangup * below. */ kref_put(&hp->kref, destroy_hvc_struct); /* - * This function call will auto chain call hvc_hangup. The tty should - * always be valid at this time unless a simultaneous tty close already - * cleaned up the hvc_struct. + * This function call will auto chain call hvc_hangup. */ - if (tty) - tty_hangup(tty); + if (tty) { + tty_vhangup(tty); + tty_kref_put(tty); + } return 0; } EXPORT_SYMBOL_GPL(hvc_remove); -- cgit v1.2.3 From d4bee0a677cfa5a32f964ffa420e27406c65e605 Mon Sep 17 00:00:00 2001 From: Fang Wenqi Date: Tue, 9 Mar 2010 18:54:28 +0800 Subject: tty_buffer: Fix distinct type warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit CC drivers/char/tty_buffer.o drivers/char/tty_buffer.c: In function ‘tty_insert_flip_string_fixed_flag’: drivers/char/tty_buffer.c:251: warning: comparison of distinct pointer types lacks a cast drivers/char/tty_buffer.c: In function ‘tty_insert_flip_string_flags’: drivers/char/tty_buffer.c:288: warning: comparison of distinct pointer types lacks a cast Fix it by replacing min() with min_t() in tty_insert_flip_string_flags and tty_insert_flip_string_fixed_flag(). Signed-off-by: Fang Wenqi Signed-off-by: Greg Kroah-Hartman --- drivers/char/tty_buffer.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/char/tty_buffer.c b/drivers/char/tty_buffer.c index af8d97715728..7ee52164d474 100644 --- a/drivers/char/tty_buffer.c +++ b/drivers/char/tty_buffer.c @@ -248,7 +248,7 @@ int tty_insert_flip_string_fixed_flag(struct tty_struct *tty, { int copied = 0; do { - int goal = min(size - copied, TTY_BUFFER_PAGE); + int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE); int space = tty_buffer_request_room(tty, goal); struct tty_buffer *tb = tty->buf.tail; /* If there is no space then tb may be NULL */ @@ -285,7 +285,7 @@ int tty_insert_flip_string_flags(struct tty_struct *tty, { int copied = 0; do { - int goal = min(size - copied, TTY_BUFFER_PAGE); + int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE); int space = tty_buffer_request_room(tty, goal); struct tty_buffer *tb = tty->buf.tail; /* If there is no space then tb may be NULL */ -- cgit v1.2.3 From 231443665882a02214c3748b9f86615a3ce9e5c2 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Thu, 11 Mar 2010 14:08:18 -0800 Subject: tty: cpm_uart: use resource_size() Use the resource_size function instead of manually calculating the resource size. This reduces the chance of introducing off-by-one errors. Signed-off-by: Tobias Klauser Cc: Alan Cox Cc: Kumar Gala Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- drivers/serial/cpm_uart/cpm_uart_cpm2.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm2.c b/drivers/serial/cpm_uart/cpm_uart_cpm2.c index a9802e76b5fa..722eac18f382 100644 --- a/drivers/serial/cpm_uart/cpm_uart_cpm2.c +++ b/drivers/serial/cpm_uart/cpm_uart_cpm2.c @@ -61,7 +61,7 @@ void __iomem *cpm_uart_map_pram(struct uart_cpm_port *port, void __iomem *pram; unsigned long offset; struct resource res; - unsigned long len; + resource_size_t len; /* Don't remap parameter RAM if it has already been initialized * during console setup. @@ -74,7 +74,7 @@ void __iomem *cpm_uart_map_pram(struct uart_cpm_port *port, if (of_address_to_resource(np, 1, &res)) return NULL; - len = 1 + res.end - res.start; + len = resource_size(&res); pram = ioremap(res.start, len); if (!pram) return NULL; -- cgit v1.2.3 From 336cee42dd52824e360ab419eab4e8888eb054ec Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Mon, 8 Mar 2010 21:50:11 -0600 Subject: tty_port,usb-console: Fix usb serial console open/close regression Commit e1108a63e10d344284011cccc06328b2cd3e5da3 ("usb_serial: Use the shutdown() operation") breaks the ability to use a usb console starting in 2.6.33. This was observed when using console=ttyUSB0,115200 as a boot argument with an FTDI device. The error is: ftdi_sio ttyUSB0: ftdi_submit_read_urb - failed submitting read urb, error -22 The handling of the ASYNCB_INITIALIZED changed in 2.6.32 such that in tty_port_shutdown() it always clears the flag if it is set. The fix is to add a variable to the tty_port struct to indicate when the tty port is a console. CC: Alan Cox CC: Alan Stern CC: Oliver Neukum CC: Andrew Morton Signed-off-by: Jason Wessel Signed-off-by: Greg Kroah-Hartman --- drivers/char/tty_port.c | 2 +- drivers/usb/serial/console.c | 1 + include/linux/tty.h | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/char/tty_port.c b/drivers/char/tty_port.c index be492dd66437..a3bd1d0b66cf 100644 --- a/drivers/char/tty_port.c +++ b/drivers/char/tty_port.c @@ -119,7 +119,7 @@ EXPORT_SYMBOL(tty_port_tty_set); static void tty_port_shutdown(struct tty_port *port) { mutex_lock(&port->mutex); - if (port->ops->shutdown && + if (port->ops->shutdown && !port->console && test_and_clear_bit(ASYNCB_INITIALIZED, &port->flags)) port->ops->shutdown(port); mutex_unlock(&port->mutex); diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c index b22ac3258523..f347da2ef00a 100644 --- a/drivers/usb/serial/console.c +++ b/drivers/usb/serial/console.c @@ -181,6 +181,7 @@ static int usb_console_setup(struct console *co, char *options) /* The console is special in terms of closing the device so * indicate this port is now acting as a system console. */ port->console = 1; + port->port.console = 1; mutex_unlock(&serial->disc_mutex); return retval; diff --git a/include/linux/tty.h b/include/linux/tty.h index 593228a520e1..4409967db0c4 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -224,6 +224,7 @@ struct tty_port { wait_queue_head_t close_wait; /* Close waiters */ wait_queue_head_t delta_msr_wait; /* Modem status change */ unsigned long flags; /* TTY flags ASY_*/ + unsigned char console:1; /* port is a console */ struct mutex mutex; /* Locking */ struct mutex buf_mutex; /* Buffer alloc lock */ unsigned char *xmit_buf; /* Optional buffer */ -- cgit v1.2.3 From 7152b592593b9d48b33f8997b1dfd6df9143f7ec Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Sat, 6 Mar 2010 15:04:03 -0500 Subject: USB: fix usbfs regression This patch (as1352) fixes a bug in the way isochronous input data is returned to userspace for usbfs transfers. The entire buffer must be copied, not just the first actual_length bytes, because the individual packets will be discontiguous if any of them are short. Reported-by: Markus Rechberger Signed-off-by: Alan Stern CC: stable Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/devio.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index e909ff7b9094..3466fdc5bb11 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -1207,6 +1207,13 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb, free_async(as); return -ENOMEM; } + /* Isochronous input data may end up being discontiguous + * if some of the packets are short. Clear the buffer so + * that the gaps don't leak kernel data to userspace. + */ + if (is_in && uurb->type == USBDEVFS_URB_TYPE_ISO) + memset(as->urb->transfer_buffer, 0, + uurb->buffer_length); } as->urb->dev = ps->dev; as->urb->pipe = (uurb->type << 30) | @@ -1345,10 +1352,14 @@ static int processcompl(struct async *as, void __user * __user *arg) void __user *addr = as->userurb; unsigned int i; - if (as->userbuffer && urb->actual_length) - if (copy_to_user(as->userbuffer, urb->transfer_buffer, - urb->actual_length)) + if (as->userbuffer && urb->actual_length) { + if (urb->number_of_packets > 0) /* Isochronous */ + i = urb->transfer_buffer_length; + else /* Non-Isoc */ + i = urb->actual_length; + if (copy_to_user(as->userbuffer, urb->transfer_buffer, i)) goto err_out; + } if (put_user(as->status, &userurb->status)) goto err_out; if (put_user(urb->actual_length, &userurb->actual_length)) -- cgit v1.2.3 From 0ae1474367a15e1b65a9deed3a73a14475a419fc Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Sat, 27 Feb 2010 14:05:46 +0100 Subject: USB: serial: fix error message on close in generic driver Resubmitting read urb fails with -EPERM if completion handler runs while urb is being killed on close. This should not be reported as an error. Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/generic.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c index 89fac36684c5..e560d1d7f628 100644 --- a/drivers/usb/serial/generic.c +++ b/drivers/usb/serial/generic.c @@ -415,11 +415,13 @@ void usb_serial_generic_resubmit_read_urb(struct usb_serial_port *port, ((serial->type->read_bulk_callback) ? serial->type->read_bulk_callback : usb_serial_generic_read_bulk_callback), port); + result = usb_submit_urb(urb, mem_flags); - if (result) + if (result && result != -EPERM) { dev_err(&port->dev, "%s - failed resubmitting read urb, error %d\n", __func__, result); + } } EXPORT_SYMBOL_GPL(usb_serial_generic_resubmit_read_urb); -- cgit v1.2.3 From 6313620228624ff4dcb78b1dbd459d0c208df126 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Sat, 27 Feb 2010 14:06:07 +0100 Subject: USB: serial: fix softint not being called on errors Make sure usb_serial_port_softint is called on errors also when using multi urb writes. Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/generic.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c index e560d1d7f628..214bf25bc3b5 100644 --- a/drivers/usb/serial/generic.c +++ b/drivers/usb/serial/generic.c @@ -500,23 +500,18 @@ void usb_serial_generic_write_bulk_callback(struct urb *urb) if (port->urbs_in_flight < 0) port->urbs_in_flight = 0; spin_unlock_irqrestore(&port->lock, flags); - - if (status) { - dbg("%s - nonzero multi-urb write bulk status " - "received: %d", __func__, status); - return; - } } else { port->write_urb_busy = 0; - if (status) { - dbg("%s - nonzero multi-urb write bulk status " - "received: %d", __func__, status); + if (status) kfifo_reset_out(&port->write_fifo); - } else + else usb_serial_generic_write_start(port); } + if (status) + dbg("%s - non-zero urb status: %d", __func__, status); + usb_serial_port_softint(port); } EXPORT_SYMBOL_GPL(usb_serial_generic_write_bulk_callback); -- cgit v1.2.3 From eb8878a881c306ff3eab6e741ab8fc94096f4e1a Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Sat, 27 Feb 2010 16:24:49 +0100 Subject: USB: serial: use port endpoint size to determine if ep is available It is possible to have a multi-port device with a port lacking an in or out bulk endpoint. Only checking for num_bulk_in or num_bulk_out is thus not sufficient to determine whether a specific port has an in or out bulk endpoint. This fixes potential null pointer dereferences in the generic open and write routines, as well as access to uninitialised fifo in write_room and chars_in_buffer. Also let write fail with ENODEV (instead of 0) on missing out endpoint (also on zero-length writes). Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/generic.c | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c index 214bf25bc3b5..f804acb138ec 100644 --- a/drivers/usb/serial/generic.c +++ b/drivers/usb/serial/generic.c @@ -130,7 +130,7 @@ int usb_serial_generic_open(struct tty_struct *tty, struct usb_serial_port *port spin_unlock_irqrestore(&port->lock, flags); /* if we have a bulk endpoint, start reading from it */ - if (serial->num_bulk_in) { + if (port->bulk_in_size) { /* Start reading from the device */ usb_fill_bulk_urb(port->read_urb, serial->dev, usb_rcvbulkpipe(serial->dev, @@ -159,10 +159,10 @@ static void generic_cleanup(struct usb_serial_port *port) dbg("%s - port %d", __func__, port->number); if (serial->dev) { - /* shutdown any bulk reads that might be going on */ - if (serial->num_bulk_out) + /* shutdown any bulk transfers that might be going on */ + if (port->bulk_out_size) usb_kill_urb(port->write_urb); - if (serial->num_bulk_in) + if (port->bulk_in_size) usb_kill_urb(port->read_urb); } } @@ -333,15 +333,15 @@ int usb_serial_generic_write(struct tty_struct *tty, dbg("%s - port %d", __func__, port->number); + /* only do something if we have a bulk out endpoint */ + if (!port->bulk_out_size) + return -ENODEV; + if (count == 0) { dbg("%s - write request of 0 bytes", __func__); return 0; } - /* only do something if we have a bulk out endpoint */ - if (!serial->num_bulk_out) - return 0; - if (serial->type->max_in_flight_urbs) return usb_serial_multi_urb_write(tty, port, buf, count); @@ -364,14 +364,19 @@ int usb_serial_generic_write_room(struct tty_struct *tty) int room = 0; dbg("%s - port %d", __func__, port->number); + + if (!port->bulk_out_size) + return 0; + spin_lock_irqsave(&port->lock, flags); if (serial->type->max_in_flight_urbs) { if (port->urbs_in_flight < serial->type->max_in_flight_urbs) room = port->bulk_out_size * (serial->type->max_in_flight_urbs - port->urbs_in_flight); - } else if (serial->num_bulk_out) + } else { room = kfifo_avail(&port->write_fifo); + } spin_unlock_irqrestore(&port->lock, flags); dbg("%s - returns %d", __func__, room); @@ -382,15 +387,18 @@ int usb_serial_generic_chars_in_buffer(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct usb_serial *serial = port->serial; - int chars = 0; unsigned long flags; + int chars; dbg("%s - port %d", __func__, port->number); + if (!port->bulk_out_size) + return 0; + spin_lock_irqsave(&port->lock, flags); if (serial->type->max_in_flight_urbs) chars = port->tx_bytes_flight; - else if (serial->num_bulk_out) + else chars = kfifo_len(&port->write_fifo); spin_unlock_irqrestore(&port->lock, flags); -- cgit v1.2.3 From cd0e8aa1f4d36ece677b8ecf270ba921843dc6ca Mon Sep 17 00:00:00 2001 From: Ondrej Zary Date: Sat, 27 Feb 2010 22:56:28 +0100 Subject: USB: unusual_devs.h: Fix capacity for SL11R-IDE 2.6c SL11R-IDE 2.6c (at least) reports wrong capacity (one sector more). Reading that last sector causes the device not to work anymore (and looks like HAL or something does that automatically after plugging in): sd 5:0:0:0: [sdc] Device not ready sd 5:0:0:0: [sdc] Result: hostbyte=0x00 driverbyte=0x08 sd 5:0:0:0: [sdc] Sense Key : 0x2 [current] sd 5:0:0:0: [sdc] ASC=0x0 ASCQ=0x0 sd 5:0:0:0: [sdc] CDB: cdb[0]=0x28: 28 00 04 a8 b5 70 00 00 01 00 Add unusual_devs entry to fix the capacity. Signed-off-by: Ondrej Zary Signed-off-by: Phil Dibowitz Signed-off-by: Greg Kroah-Hartman --- drivers/usb/storage/unusual_devs.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 98b549b1cab2..61c8b9df96e6 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -374,6 +374,15 @@ UNUSUAL_DEV( 0x04ce, 0x0002, 0x0074, 0x0074, US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_FIX_INQUIRY), +/* Reported by Ondrej Zary + * The device reports one sector more and breaks when that sector is accessed + */ +UNUSUAL_DEV( 0x04ce, 0x0002, 0x026c, 0x026c, + "ScanLogic", + "SL11R-IDE", + US_SC_DEVICE, US_PR_DEVICE, NULL, + US_FL_FIX_CAPACITY), + /* Reported by Kriston Fincher * Patch submitted by Sean Millichamp * This is to support the Panasonic PalmCam PV-SD4090 -- cgit v1.2.3 From bf162019b7f5bda9eb3241ae22de831df2126132 Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Sun, 28 Feb 2010 13:51:29 +0800 Subject: USB: Option: Add support for a variant of DLink DWM 652 U5 I found a DLink DWM 652 U5 USB 3G modem has product ID 0xce1e instead of orignal 0xce16. The new ID is added. And I found there are two entries for 0xce16, one has raw number, the other has symbol DLINK_PRODUCT_DWM_652_U5. This is fixed too. Signed-off-by: Huang Ying Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/option.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 847b805d63a3..3ab1a0440d81 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -309,6 +309,7 @@ static int option_resume(struct usb_serial *serial); #define DLINK_VENDOR_ID 0x1186 #define DLINK_PRODUCT_DWM_652 0x3e04 #define DLINK_PRODUCT_DWM_652_U5 0xce16 +#define DLINK_PRODUCT_DWM_652_U5A 0xce1e #define QISDA_VENDOR_ID 0x1da5 #define QISDA_PRODUCT_H21_4512 0x4512 @@ -659,6 +660,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */ + { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5A) }, { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) }, { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4523) }, { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4515) }, @@ -666,7 +668,6 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_G450) }, { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */ { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) }, - { USB_DEVICE(ALINK_VENDOR_ID, 0xce16) }, { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) }, { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, -- cgit v1.2.3 From 92bc3648e6027384479852b770a542722fadee7c Mon Sep 17 00:00:00 2001 From: Clemens Ladisch Date: Mon, 1 Mar 2010 09:12:50 +0100 Subject: USB: EHCI: fix ITD list order When isochronous URBs are shorter than one frame and when more than one ITD in a frame has been completed before the interrupt can be handled, scan_periodic() completes the URBs in the order in which they are found in the descriptor list. Therefore, the descriptor list must contain the ITDs in the correct order, i.e., a new ITD must be linked in after any previous ITDs of the same endpoint. This should fix garbled capture data in the USB audio drivers. Signed-off-by: Clemens Ladisch Reported-by: Colin Fletcher Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/ehci-sched.c | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index 39340ae00ac4..cd1e8bf5327e 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -1565,13 +1565,27 @@ itd_patch( static inline void itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd) { - /* always prepend ITD/SITD ... only QH tree is order-sensitive */ - itd->itd_next = ehci->pshadow [frame]; - itd->hw_next = ehci->periodic [frame]; - ehci->pshadow [frame].itd = itd; + union ehci_shadow *prev = &ehci->pshadow[frame]; + __hc32 *hw_p = &ehci->periodic[frame]; + union ehci_shadow here = *prev; + __hc32 type = 0; + + /* skip any iso nodes which might belong to previous microframes */ + while (here.ptr) { + type = Q_NEXT_TYPE(ehci, *hw_p); + if (type == cpu_to_hc32(ehci, Q_TYPE_QH)) + break; + prev = periodic_next_shadow(ehci, prev, type); + hw_p = shadow_next_periodic(ehci, &here, type); + here = *prev; + } + + itd->itd_next = here; + itd->hw_next = *hw_p; + prev->itd = itd; itd->frame = frame; wmb (); - ehci->periodic[frame] = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD); + *hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD); } /* fit urb's itds into the selected schedule slot; activate as needed */ -- cgit v1.2.3 From 1082f57abfa26590b60c43f503afb24102a37016 Mon Sep 17 00:00:00 2001 From: Clemens Ladisch Date: Mon, 1 Mar 2010 17:18:56 +0100 Subject: USB: EHCI: adjust ehci_iso_stream for changes in ehci_qh The EHCI driver stores in usb_host_endpoint.hcpriv a pointer to either an ehci_qh or an ehci_iso_stream structure, and uses the contents of the hw_info1 field to distinguish the two cases. After ehci_qh was split into hw and sw parts, ehci_iso_stream must also be adjusted so that it again looks like an ehci_qh structure. This fixes a NULL pointer access in ehci_endpoint_disable() when it tries to access qh->hw->hw_info1. Signed-off-by: Clemens Ladisch Reported-by: Colin Fletcher Cc: stable Acked-by: Alan Stern Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/ehci-hcd.c | 2 +- drivers/usb/host/ehci-sched.c | 4 ++-- drivers/usb/host/ehci.h | 5 ++--- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index d8d6d3461d32..dc55a62859c6 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -995,7 +995,7 @@ rescan: /* endpoints can be iso streams. for now, we don't * accelerate iso completions ... so spin a while. */ - if (qh->hw->hw_info1 == 0) { + if (qh->hw == NULL) { ehci_vdbg (ehci, "iso delay\n"); goto idle_timeout; } diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index cd1e8bf5327e..a0aaaaff2560 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -1123,8 +1123,8 @@ iso_stream_find (struct ehci_hcd *ehci, struct urb *urb) urb->interval); } - /* if dev->ep [epnum] is a QH, info1.maxpacket is nonzero */ - } else if (unlikely (stream->hw_info1 != 0)) { + /* if dev->ep [epnum] is a QH, hw is set */ + } else if (unlikely (stream->hw != NULL)) { ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n", urb->dev->devpath, epnum, usb_pipein(urb->pipe) ? "in" : "out"); diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h index 2d85e21ff282..b1dce96dd621 100644 --- a/drivers/usb/host/ehci.h +++ b/drivers/usb/host/ehci.h @@ -394,9 +394,8 @@ struct ehci_iso_sched { * acts like a qh would, if EHCI had them for ISO. */ struct ehci_iso_stream { - /* first two fields match QH, but info1 == 0 */ - __hc32 hw_next; - __hc32 hw_info1; + /* first field matches ehci_hq, but is NULL */ + struct ehci_qh_hw *hw; u32 refcount; u8 bEndpointAddress; -- cgit v1.2.3 From f0730924e9e32bb8935c60040a26d94179355088 Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Wed, 3 Mar 2010 00:37:56 +0100 Subject: USB: cdc-acm: Fix stupid NULL pointer in resume() Stupid logic bug passing a just nulled pointer Signed-off-by: Oliver Neukum Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/usb/class/cdc-acm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 975d556b4787..be6331e2c276 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1441,7 +1441,7 @@ static int acm_resume(struct usb_interface *intf) wb = acm->delayed_wb; acm->delayed_wb = NULL; spin_unlock_irq(&acm->write_lock); - acm_start_wb(acm, acm->delayed_wb); + acm_start_wb(acm, wb); } else { spin_unlock_irq(&acm->write_lock); } -- cgit v1.2.3 From 0725e95ea56698774e893edb7e7276b1d6890954 Mon Sep 17 00:00:00 2001 From: Bernhard Rosenkraenzer Date: Wed, 10 Mar 2010 12:36:43 +0100 Subject: USB: qcserial: add new device ids This patch adds various USB device IDs for Gobi 2000 devices, as found in the drivers available at https://www.codeaurora.org/wiki/GOBI_Releases Signed-off-by: Bernhard Rosenkraenzer Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/qcserial.c | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 310ff6ec6567..53a2d5a935a2 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -47,6 +47,35 @@ static const struct usb_device_id id_table[] = { {USB_DEVICE(0x05c6, 0x9221)}, /* Generic Gobi QDL device */ {USB_DEVICE(0x05c6, 0x9231)}, /* Generic Gobi QDL device */ {USB_DEVICE(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */ + {USB_DEVICE(0x413c, 0x8185)}, /* Dell Gobi 2000 QDL device (N0218, VU936) */ + {USB_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */ + {USB_DEVICE(0x05c6, 0x9224)}, /* Sony Gobi 2000 QDL device (N0279, VU730) */ + {USB_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */ + {USB_DEVICE(0x05c6, 0x9244)}, /* Samsung Gobi 2000 QDL device (VL176) */ + {USB_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */ + {USB_DEVICE(0x03f0, 0x241d)}, /* HP Gobi 2000 QDL device (VP412) */ + {USB_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */ + {USB_DEVICE(0x05c6, 0x9214)}, /* Acer Gobi 2000 QDL device (VP413) */ + {USB_DEVICE(0x05c6, 0x9215)}, /* Acer Gobi 2000 Modem device (VP413) */ + {USB_DEVICE(0x05c6, 0x9264)}, /* Asus Gobi 2000 QDL device (VR305) */ + {USB_DEVICE(0x05c6, 0x9265)}, /* Asus Gobi 2000 Modem device (VR305) */ + {USB_DEVICE(0x05c6, 0x9234)}, /* Top Global Gobi 2000 QDL device (VR306) */ + {USB_DEVICE(0x05c6, 0x9235)}, /* Top Global Gobi 2000 Modem device (VR306) */ + {USB_DEVICE(0x05c6, 0x9274)}, /* iRex Technologies Gobi 2000 QDL device (VR307) */ + {USB_DEVICE(0x05c6, 0x9275)}, /* iRex Technologies Gobi 2000 Modem device (VR307) */ + {USB_DEVICE(0x1199, 0x9000)}, /* Sierra Wireless Gobi 2000 QDL device (VT773) */ + {USB_DEVICE(0x1199, 0x9001)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ + {USB_DEVICE(0x1199, 0x9002)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ + {USB_DEVICE(0x1199, 0x9003)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ + {USB_DEVICE(0x1199, 0x9004)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ + {USB_DEVICE(0x1199, 0x9005)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ + {USB_DEVICE(0x1199, 0x9006)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ + {USB_DEVICE(0x1199, 0x9007)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ + {USB_DEVICE(0x1199, 0x9008)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ + {USB_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ + {USB_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ + {USB_DEVICE(0x16d8, 0x8001)}, /* CMDTech Gobi 2000 QDL device (VU922) */ + {USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */ { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, id_table); -- cgit v1.2.3 From ae926976ac362efc9db2365a07891cc52414f2ec Mon Sep 17 00:00:00 2001 From: Sonic Zhang Date: Mon, 8 Mar 2010 11:26:01 -0500 Subject: USB: musb: fix build error introduced by isoc change The recent commit "usb: musb: Fix for isochronous IN transfer" (f82a689fa) seems to have been against an older kernel version. It uses the old style naming of variables. Unfortunately, this breaks building for most MUSB users out there since "bDesiredMode" has been renamed to "desired_mode". Signed-off-by: Sonic Zhang Signed-off-by: Mike Frysinger Acked-by: Felipe Balbi Acked-by: Anand Gadiyar Signed-off-by: Greg Kroah-Hartman --- drivers/usb/musb/musb_host.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 3421cf9858b5..dec896e888db 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c @@ -1689,7 +1689,7 @@ void musb_host_rx(struct musb *musb, u8 epnum) dma->desired_mode = 1; if (rx_count < hw_ep->max_packet_sz_rx) { length = rx_count; - dma->bDesiredMode = 0; + dma->desired_mode = 0; } else { length = urb->transfer_buffer_length; } -- cgit v1.2.3 From bc75fa3825cdbbdeee3a65d91cc5583bdfe41edf Mon Sep 17 00:00:00 2001 From: Alex Chiang Date: Tue, 16 Mar 2010 14:48:45 -0600 Subject: USB: xhci: rename driver to xhci_hcd Naming consistency with other USB HCDs. Signed-off-by: Alex Chiang Cc: Sarah Sharp Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/Makefile | 4 +- drivers/usb/host/xhci-hcd.c | 1916 ------------------------------------------- drivers/usb/host/xhci.c | 1916 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 1918 insertions(+), 1918 deletions(-) delete mode 100644 drivers/usb/host/xhci-hcd.c create mode 100644 drivers/usb/host/xhci.c diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile index 4e0c67f1f51b..b6315aa47f7a 100644 --- a/drivers/usb/host/Makefile +++ b/drivers/usb/host/Makefile @@ -12,7 +12,7 @@ fhci-objs := fhci-hcd.o fhci-hub.o fhci-q.o fhci-mem.o \ ifeq ($(CONFIG_FHCI_DEBUG),y) fhci-objs += fhci-dbg.o endif -xhci-objs := xhci-hcd.o xhci-mem.o xhci-pci.o xhci-ring.o xhci-hub.o xhci-dbg.o +xhci-hcd-objs := xhci.o xhci-mem.o xhci-pci.o xhci-ring.o xhci-hub.o xhci-dbg.o obj-$(CONFIG_USB_WHCI_HCD) += whci/ @@ -25,7 +25,7 @@ obj-$(CONFIG_USB_ISP1362_HCD) += isp1362-hcd.o obj-$(CONFIG_USB_OHCI_HCD) += ohci-hcd.o obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o obj-$(CONFIG_USB_FHCI_HCD) += fhci.o -obj-$(CONFIG_USB_XHCI_HCD) += xhci.o +obj-$(CONFIG_USB_XHCI_HCD) += xhci-hcd.o obj-$(CONFIG_USB_SL811_HCD) += sl811-hcd.o obj-$(CONFIG_USB_SL811_CS) += sl811_cs.o obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c deleted file mode 100644 index 4cb69e0af834..000000000000 --- a/drivers/usb/host/xhci-hcd.c +++ /dev/null @@ -1,1916 +0,0 @@ -/* - * xHCI host controller driver - * - * Copyright (C) 2008 Intel Corp. - * - * Author: Sarah Sharp - * Some code borrowed from the Linux EHCI driver. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software Foundation, - * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#include -#include -#include - -#include "xhci.h" - -#define DRIVER_AUTHOR "Sarah Sharp" -#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" - -/* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ -static int link_quirk; -module_param(link_quirk, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB"); - -/* TODO: copied from ehci-hcd.c - can this be refactored? */ -/* - * handshake - spin reading hc until handshake completes or fails - * @ptr: address of hc register to be read - * @mask: bits to look at in result of read - * @done: value of those bits when handshake succeeds - * @usec: timeout in microseconds - * - * Returns negative errno, or zero on success - * - * Success happens when the "mask" bits have the specified value (hardware - * handshake done). There are two failure modes: "usec" have passed (major - * hardware flakeout), or the register reads as all-ones (hardware removed). - */ -static int handshake(struct xhci_hcd *xhci, void __iomem *ptr, - u32 mask, u32 done, int usec) -{ - u32 result; - - do { - result = xhci_readl(xhci, ptr); - if (result == ~(u32)0) /* card removed */ - return -ENODEV; - result &= mask; - if (result == done) - return 0; - udelay(1); - usec--; - } while (usec > 0); - return -ETIMEDOUT; -} - -/* - * Disable interrupts and begin the xHCI halting process. - */ -void xhci_quiesce(struct xhci_hcd *xhci) -{ - u32 halted; - u32 cmd; - u32 mask; - - mask = ~(XHCI_IRQS); - halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT; - if (!halted) - mask &= ~CMD_RUN; - - cmd = xhci_readl(xhci, &xhci->op_regs->command); - cmd &= mask; - xhci_writel(xhci, cmd, &xhci->op_regs->command); -} - -/* - * Force HC into halt state. - * - * Disable any IRQs and clear the run/stop bit. - * HC will complete any current and actively pipelined transactions, and - * should halt within 16 microframes of the run/stop bit being cleared. - * Read HC Halted bit in the status register to see when the HC is finished. - * XXX: shouldn't we set HC_STATE_HALT here somewhere? - */ -int xhci_halt(struct xhci_hcd *xhci) -{ - xhci_dbg(xhci, "// Halt the HC\n"); - xhci_quiesce(xhci); - - return handshake(xhci, &xhci->op_regs->status, - STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); -} - -/* - * Reset a halted HC, and set the internal HC state to HC_STATE_HALT. - * - * This resets pipelines, timers, counters, state machines, etc. - * Transactions will be terminated immediately, and operational registers - * will be set to their defaults. - */ -int xhci_reset(struct xhci_hcd *xhci) -{ - u32 command; - u32 state; - - state = xhci_readl(xhci, &xhci->op_regs->status); - if ((state & STS_HALT) == 0) { - xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); - return 0; - } - - xhci_dbg(xhci, "// Reset the HC\n"); - command = xhci_readl(xhci, &xhci->op_regs->command); - command |= CMD_RESET; - xhci_writel(xhci, command, &xhci->op_regs->command); - /* XXX: Why does EHCI set this here? Shouldn't other code do this? */ - xhci_to_hcd(xhci)->state = HC_STATE_HALT; - - return handshake(xhci, &xhci->op_regs->command, CMD_RESET, 0, 250 * 1000); -} - - -#if 0 -/* Set up MSI-X table for entry 0 (may claim other entries later) */ -static int xhci_setup_msix(struct xhci_hcd *xhci) -{ - int ret; - struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); - - xhci->msix_count = 0; - /* XXX: did I do this right? ixgbe does kcalloc for more than one */ - xhci->msix_entries = kmalloc(sizeof(struct msix_entry), GFP_KERNEL); - if (!xhci->msix_entries) { - xhci_err(xhci, "Failed to allocate MSI-X entries\n"); - return -ENOMEM; - } - xhci->msix_entries[0].entry = 0; - - ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count); - if (ret) { - xhci_err(xhci, "Failed to enable MSI-X\n"); - goto free_entries; - } - - /* - * Pass the xhci pointer value as the request_irq "cookie". - * If more irqs are added, this will need to be unique for each one. - */ - ret = request_irq(xhci->msix_entries[0].vector, &xhci_irq, 0, - "xHCI", xhci_to_hcd(xhci)); - if (ret) { - xhci_err(xhci, "Failed to allocate MSI-X interrupt\n"); - goto disable_msix; - } - xhci_dbg(xhci, "Finished setting up MSI-X\n"); - return 0; - -disable_msix: - pci_disable_msix(pdev); -free_entries: - kfree(xhci->msix_entries); - xhci->msix_entries = NULL; - return ret; -} - -/* XXX: code duplication; can xhci_setup_msix call this? */ -/* Free any IRQs and disable MSI-X */ -static void xhci_cleanup_msix(struct xhci_hcd *xhci) -{ - struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); - if (!xhci->msix_entries) - return; - - free_irq(xhci->msix_entries[0].vector, xhci); - pci_disable_msix(pdev); - kfree(xhci->msix_entries); - xhci->msix_entries = NULL; - xhci_dbg(xhci, "Finished cleaning up MSI-X\n"); -} -#endif - -/* - * Initialize memory for HCD and xHC (one-time init). - * - * Program the PAGESIZE register, initialize the device context array, create - * device contexts (?), set up a command ring segment (or two?), create event - * ring (one for now). - */ -int xhci_init(struct usb_hcd *hcd) -{ - struct xhci_hcd *xhci = hcd_to_xhci(hcd); - int retval = 0; - - xhci_dbg(xhci, "xhci_init\n"); - spin_lock_init(&xhci->lock); - if (link_quirk) { - xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n"); - xhci->quirks |= XHCI_LINK_TRB_QUIRK; - } else { - xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n"); - } - retval = xhci_mem_init(xhci, GFP_KERNEL); - xhci_dbg(xhci, "Finished xhci_init\n"); - - return retval; -} - -/* - * Called in interrupt context when there might be work - * queued on the event ring - * - * xhci->lock must be held by caller. - */ -static void xhci_work(struct xhci_hcd *xhci) -{ - u32 temp; - u64 temp_64; - - /* - * Clear the op reg interrupt status first, - * so we can receive interrupts from other MSI-X interrupters. - * Write 1 to clear the interrupt status. - */ - temp = xhci_readl(xhci, &xhci->op_regs->status); - temp |= STS_EINT; - xhci_writel(xhci, temp, &xhci->op_regs->status); - /* FIXME when MSI-X is supported and there are multiple vectors */ - /* Clear the MSI-X event interrupt status */ - - /* Acknowledge the interrupt */ - temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); - temp |= 0x3; - xhci_writel(xhci, temp, &xhci->ir_set->irq_pending); - /* Flush posted writes */ - xhci_readl(xhci, &xhci->ir_set->irq_pending); - - if (xhci->xhc_state & XHCI_STATE_DYING) - xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " - "Shouldn't IRQs be disabled?\n"); - else - /* FIXME this should be a delayed service routine - * that clears the EHB. - */ - xhci_handle_event(xhci); - - /* Clear the event handler busy flag (RW1C); the event ring should be empty. */ - temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); - xhci_write_64(xhci, temp_64 | ERST_EHB, &xhci->ir_set->erst_dequeue); - /* Flush posted writes -- FIXME is this necessary? */ - xhci_readl(xhci, &xhci->ir_set->irq_pending); -} - -/*-------------------------------------------------------------------------*/ - -/* - * xHCI spec says we can get an interrupt, and if the HC has an error condition, - * we might get bad data out of the event ring. Section 4.10.2.7 has a list of - * indicators of an event TRB error, but we check the status *first* to be safe. - */ -irqreturn_t xhci_irq(struct usb_hcd *hcd) -{ - struct xhci_hcd *xhci = hcd_to_xhci(hcd); - u32 temp, temp2; - union xhci_trb *trb; - - spin_lock(&xhci->lock); - trb = xhci->event_ring->dequeue; - /* Check if the xHC generated the interrupt, or the irq is shared */ - temp = xhci_readl(xhci, &xhci->op_regs->status); - temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending); - if (temp == 0xffffffff && temp2 == 0xffffffff) - goto hw_died; - - if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) { - spin_unlock(&xhci->lock); - return IRQ_NONE; - } - xhci_dbg(xhci, "op reg status = %08x\n", temp); - xhci_dbg(xhci, "ir set irq_pending = %08x\n", temp2); - xhci_dbg(xhci, "Event ring dequeue ptr:\n"); - xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n", - (unsigned long long)xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb), - lower_32_bits(trb->link.segment_ptr), - upper_32_bits(trb->link.segment_ptr), - (unsigned int) trb->link.intr_target, - (unsigned int) trb->link.control); - - if (temp & STS_FATAL) { - xhci_warn(xhci, "WARNING: Host System Error\n"); - xhci_halt(xhci); -hw_died: - xhci_to_hcd(xhci)->state = HC_STATE_HALT; - spin_unlock(&xhci->lock); - return -ESHUTDOWN; - } - - xhci_work(xhci); - spin_unlock(&xhci->lock); - - return IRQ_HANDLED; -} - -#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING -void xhci_event_ring_work(unsigned long arg) -{ - unsigned long flags; - int temp; - u64 temp_64; - struct xhci_hcd *xhci = (struct xhci_hcd *) arg; - int i, j; - - xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies); - - spin_lock_irqsave(&xhci->lock, flags); - temp = xhci_readl(xhci, &xhci->op_regs->status); - xhci_dbg(xhci, "op reg status = 0x%x\n", temp); - if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) { - xhci_dbg(xhci, "HW died, polling stopped.\n"); - spin_unlock_irqrestore(&xhci->lock, flags); - return; - } - - temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); - xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp); - xhci_dbg(xhci, "No-op commands handled = %d\n", xhci->noops_handled); - xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask); - xhci->error_bitmask = 0; - xhci_dbg(xhci, "Event ring:\n"); - xhci_debug_segment(xhci, xhci->event_ring->deq_seg); - xhci_dbg_ring_ptrs(xhci, xhci->event_ring); - temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); - temp_64 &= ~ERST_PTR_MASK; - xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); - xhci_dbg(xhci, "Command ring:\n"); - xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg); - xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); - xhci_dbg_cmd_ptrs(xhci); - for (i = 0; i < MAX_HC_SLOTS; ++i) { - if (!xhci->devs[i]) - continue; - for (j = 0; j < 31; ++j) { - struct xhci_ring *ring = xhci->devs[i]->eps[j].ring; - if (!ring) - continue; - xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j); - xhci_debug_segment(xhci, ring->deq_seg); - } - } - - if (xhci->noops_submitted != NUM_TEST_NOOPS) - if (xhci_setup_one_noop(xhci)) - xhci_ring_cmd_db(xhci); - spin_unlock_irqrestore(&xhci->lock, flags); - - if (!xhci->zombie) - mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ); - else - xhci_dbg(xhci, "Quit polling the event ring.\n"); -} -#endif - -/* - * Start the HC after it was halted. - * - * This function is called by the USB core when the HC driver is added. - * Its opposite is xhci_stop(). - * - * xhci_init() must be called once before this function can be called. - * Reset the HC, enable device slot contexts, program DCBAAP, and - * set command ring pointer and event ring pointer. - * - * Setup MSI-X vectors and enable interrupts. - */ -int xhci_run(struct usb_hcd *hcd) -{ - u32 temp; - u64 temp_64; - struct xhci_hcd *xhci = hcd_to_xhci(hcd); - void (*doorbell)(struct xhci_hcd *) = NULL; - - hcd->uses_new_polling = 1; - hcd->poll_rh = 0; - - xhci_dbg(xhci, "xhci_run\n"); -#if 0 /* FIXME: MSI not setup yet */ - /* Do this at the very last minute */ - ret = xhci_setup_msix(xhci); - if (!ret) - return ret; - - return -ENOSYS; -#endif -#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING - init_timer(&xhci->event_ring_timer); - xhci->event_ring_timer.data = (unsigned long) xhci; - xhci->event_ring_timer.function = xhci_event_ring_work; - /* Poll the event ring */ - xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ; - xhci->zombie = 0; - xhci_dbg(xhci, "Setting event ring polling timer\n"); - add_timer(&xhci->event_ring_timer); -#endif - - xhci_dbg(xhci, "Command ring memory map follows:\n"); - xhci_debug_ring(xhci, xhci->cmd_ring); - xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); - xhci_dbg_cmd_ptrs(xhci); - - xhci_dbg(xhci, "ERST memory map follows:\n"); - xhci_dbg_erst(xhci, &xhci->erst); - xhci_dbg(xhci, "Event ring:\n"); - xhci_debug_ring(xhci, xhci->event_ring); - xhci_dbg_ring_ptrs(xhci, xhci->event_ring); - temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); - temp_64 &= ~ERST_PTR_MASK; - xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); - - xhci_dbg(xhci, "// Set the interrupt modulation register\n"); - temp = xhci_readl(xhci, &xhci->ir_set->irq_control); - temp &= ~ER_IRQ_INTERVAL_MASK; - temp |= (u32) 160; - xhci_writel(xhci, temp, &xhci->ir_set->irq_control); - - /* Set the HCD state before we enable the irqs */ - hcd->state = HC_STATE_RUNNING; - temp = xhci_readl(xhci, &xhci->op_regs->command); - temp |= (CMD_EIE); - xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n", - temp); - xhci_writel(xhci, temp, &xhci->op_regs->command); - - temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); - xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n", - xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); - xhci_writel(xhci, ER_IRQ_ENABLE(temp), - &xhci->ir_set->irq_pending); - xhci_print_ir_set(xhci, xhci->ir_set, 0); - - if (NUM_TEST_NOOPS > 0) - doorbell = xhci_setup_one_noop(xhci); - - temp = xhci_readl(xhci, &xhci->op_regs->command); - temp |= (CMD_RUN); - xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n", - temp); - xhci_writel(xhci, temp, &xhci->op_regs->command); - /* Flush PCI posted writes */ - temp = xhci_readl(xhci, &xhci->op_regs->command); - xhci_dbg(xhci, "// @%p = 0x%x\n", &xhci->op_regs->command, temp); - if (doorbell) - (*doorbell)(xhci); - - xhci_dbg(xhci, "Finished xhci_run\n"); - return 0; -} - -/* - * Stop xHCI driver. - * - * This function is called by the USB core when the HC driver is removed. - * Its opposite is xhci_run(). - * - * Disable device contexts, disable IRQs, and quiesce the HC. - * Reset the HC, finish any completed transactions, and cleanup memory. - */ -void xhci_stop(struct usb_hcd *hcd) -{ - u32 temp; - struct xhci_hcd *xhci = hcd_to_xhci(hcd); - - spin_lock_irq(&xhci->lock); - xhci_halt(xhci); - xhci_reset(xhci); - spin_unlock_irq(&xhci->lock); - -#if 0 /* No MSI yet */ - xhci_cleanup_msix(xhci); -#endif -#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING - /* Tell the event ring poll function not to reschedule */ - xhci->zombie = 1; - del_timer_sync(&xhci->event_ring_timer); -#endif - - xhci_dbg(xhci, "// Disabling event ring interrupts\n"); - temp = xhci_readl(xhci, &xhci->op_regs->status); - xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); - temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); - xhci_writel(xhci, ER_IRQ_DISABLE(temp), - &xhci->ir_set->irq_pending); - xhci_print_ir_set(xhci, xhci->ir_set, 0); - - xhci_dbg(xhci, "cleaning up memory\n"); - xhci_mem_cleanup(xhci); - xhci_dbg(xhci, "xhci_stop completed - status = %x\n", - xhci_readl(xhci, &xhci->op_regs->status)); -} - -/* - * Shutdown HC (not bus-specific) - * - * This is called when the machine is rebooting or halting. We assume that the - * machine will be powered off, and the HC's internal state will be reset. - * Don't bother to free memory. - */ -void xhci_shutdown(struct usb_hcd *hcd) -{ - struct xhci_hcd *xhci = hcd_to_xhci(hcd); - - spin_lock_irq(&xhci->lock); - xhci_halt(xhci); - spin_unlock_irq(&xhci->lock); - -#if 0 - xhci_cleanup_msix(xhci); -#endif - - xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n", - xhci_readl(xhci, &xhci->op_regs->status)); -} - -/*-------------------------------------------------------------------------*/ - -/** - * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and - * HCDs. Find the index for an endpoint given its descriptor. Use the return - * value to right shift 1 for the bitmask. - * - * Index = (epnum * 2) + direction - 1, - * where direction = 0 for OUT, 1 for IN. - * For control endpoints, the IN index is used (OUT index is unused), so - * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2) - */ -unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc) -{ - unsigned int index; - if (usb_endpoint_xfer_control(desc)) - index = (unsigned int) (usb_endpoint_num(desc)*2); - else - index = (unsigned int) (usb_endpoint_num(desc)*2) + - (usb_endpoint_dir_in(desc) ? 1 : 0) - 1; - return index; -} - -/* Find the flag for this endpoint (for use in the control context). Use the - * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is - * bit 1, etc. - */ -unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc) -{ - return 1 << (xhci_get_endpoint_index(desc) + 1); -} - -/* Find the flag for this endpoint (for use in the control context). Use the - * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is - * bit 1, etc. - */ -unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index) -{ - return 1 << (ep_index + 1); -} - -/* Compute the last valid endpoint context index. Basically, this is the - * endpoint index plus one. For slot contexts with more than valid endpoint, - * we find the most significant bit set in the added contexts flags. - * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000 - * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one. - */ -unsigned int xhci_last_valid_endpoint(u32 added_ctxs) -{ - return fls(added_ctxs) - 1; -} - -/* Returns 1 if the arguments are OK; - * returns 0 this is a root hub; returns -EINVAL for NULL pointers. - */ -int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, - struct usb_host_endpoint *ep, int check_ep, const char *func) { - if (!hcd || (check_ep && !ep) || !udev) { - printk(KERN_DEBUG "xHCI %s called with invalid args\n", - func); - return -EINVAL; - } - if (!udev->parent) { - printk(KERN_DEBUG "xHCI %s called for root hub\n", - func); - return 0; - } - if (!udev->slot_id) { - printk(KERN_DEBUG "xHCI %s called with unaddressed device\n", - func); - return -EINVAL; - } - return 1; -} - -static int xhci_configure_endpoint(struct xhci_hcd *xhci, - struct usb_device *udev, struct xhci_command *command, - bool ctx_change, bool must_succeed); - -/* - * Full speed devices may have a max packet size greater than 8 bytes, but the - * USB core doesn't know that until it reads the first 8 bytes of the - * descriptor. If the usb_device's max packet size changes after that point, - * we need to issue an evaluate context command and wait on it. - */ -static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, - unsigned int ep_index, struct urb *urb) -{ - struct xhci_container_ctx *in_ctx; - struct xhci_container_ctx *out_ctx; - struct xhci_input_control_ctx *ctrl_ctx; - struct xhci_ep_ctx *ep_ctx; - int max_packet_size; - int hw_max_packet_size; - int ret = 0; - - out_ctx = xhci->devs[slot_id]->out_ctx; - ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); - hw_max_packet_size = MAX_PACKET_DECODED(ep_ctx->ep_info2); - max_packet_size = urb->dev->ep0.desc.wMaxPacketSize; - if (hw_max_packet_size != max_packet_size) { - xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n"); - xhci_dbg(xhci, "Max packet size in usb_device = %d\n", - max_packet_size); - xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n", - hw_max_packet_size); - xhci_dbg(xhci, "Issuing evaluate context command.\n"); - - /* Set up the modified control endpoint 0 */ - xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, - xhci->devs[slot_id]->out_ctx, ep_index); - in_ctx = xhci->devs[slot_id]->in_ctx; - ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); - ep_ctx->ep_info2 &= ~MAX_PACKET_MASK; - ep_ctx->ep_info2 |= MAX_PACKET(max_packet_size); - - /* Set up the input context flags for the command */ - /* FIXME: This won't work if a non-default control endpoint - * changes max packet sizes. - */ - ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); - ctrl_ctx->add_flags = EP0_FLAG; - ctrl_ctx->drop_flags = 0; - - xhci_dbg(xhci, "Slot %d input context\n", slot_id); - xhci_dbg_ctx(xhci, in_ctx, ep_index); - xhci_dbg(xhci, "Slot %d output context\n", slot_id); - xhci_dbg_ctx(xhci, out_ctx, ep_index); - - ret = xhci_configure_endpoint(xhci, urb->dev, NULL, - true, false); - - /* Clean up the input context for later use by bandwidth - * functions. - */ - ctrl_ctx->add_flags = SLOT_FLAG; - } - return ret; -} - -/* - * non-error returns are a promise to giveback() the urb later - * we drop ownership so next owner (or urb unlink) can get it - */ -int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) -{ - struct xhci_hcd *xhci = hcd_to_xhci(hcd); - unsigned long flags; - int ret = 0; - unsigned int slot_id, ep_index; - - - if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0) - return -EINVAL; - - slot_id = urb->dev->slot_id; - ep_index = xhci_get_endpoint_index(&urb->ep->desc); - - if (!xhci->devs || !xhci->devs[slot_id]) { - if (!in_interrupt()) - dev_warn(&urb->dev->dev, "WARN: urb submitted for dev with no Slot ID\n"); - ret = -EINVAL; - goto exit; - } - if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) { - if (!in_interrupt()) - xhci_dbg(xhci, "urb submitted during PCI suspend\n"); - ret = -ESHUTDOWN; - goto exit; - } - if (usb_endpoint_xfer_control(&urb->ep->desc)) { - /* Check to see if the max packet size for the default control - * endpoint changed during FS device enumeration - */ - if (urb->dev->speed == USB_SPEED_FULL) { - ret = xhci_check_maxpacket(xhci, slot_id, - ep_index, urb); - if (ret < 0) - return ret; - } - - /* We have a spinlock and interrupts disabled, so we must pass - * atomic context to this function, which may allocate memory. - */ - spin_lock_irqsave(&xhci->lock, flags); - if (xhci->xhc_state & XHCI_STATE_DYING) - goto dying; - ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, - slot_id, ep_index); - spin_unlock_irqrestore(&xhci->lock, flags); - } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { - spin_lock_irqsave(&xhci->lock, flags); - if (xhci->xhc_state & XHCI_STATE_DYING) - goto dying; - ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, - slot_id, ep_index); - spin_unlock_irqrestore(&xhci->lock, flags); - } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { - spin_lock_irqsave(&xhci->lock, flags); - if (xhci->xhc_state & XHCI_STATE_DYING) - goto dying; - ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, - slot_id, ep_index); - spin_unlock_irqrestore(&xhci->lock, flags); - } else { - ret = -EINVAL; - } -exit: - return ret; -dying: - xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " - "non-responsive xHCI host.\n", - urb->ep->desc.bEndpointAddress, urb); - spin_unlock_irqrestore(&xhci->lock, flags); - return -ESHUTDOWN; -} - -/* - * Remove the URB's TD from the endpoint ring. This may cause the HC to stop - * USB transfers, potentially stopping in the middle of a TRB buffer. The HC - * should pick up where it left off in the TD, unless a Set Transfer Ring - * Dequeue Pointer is issued. - * - * The TRBs that make up the buffers for the canceled URB will be "removed" from - * the ring. Since the ring is a contiguous structure, they can't be physically - * removed. Instead, there are two options: - * - * 1) If the HC is in the middle of processing the URB to be canceled, we - * simply move the ring's dequeue pointer past those TRBs using the Set - * Transfer Ring Dequeue Pointer command. This will be the common case, - * when drivers timeout on the last submitted URB and attempt to cancel. - * - * 2) If the HC is in the middle of a different TD, we turn the TRBs into a - * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The - * HC will need to invalidate the any TRBs it has cached after the stop - * endpoint command, as noted in the xHCI 0.95 errata. - * - * 3) The TD may have completed by the time the Stop Endpoint Command - * completes, so software needs to handle that case too. - * - * This function should protect against the TD enqueueing code ringing the - * doorbell while this code is waiting for a Stop Endpoint command to complete. - * It also needs to account for multiple cancellations on happening at the same - * time for the same endpoint. - * - * Note that this function can be called in any context, or so says - * usb_hcd_unlink_urb() - */ -int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) -{ - unsigned long flags; - int ret; - u32 temp; - struct xhci_hcd *xhci; - struct xhci_td *td; - unsigned int ep_index; - struct xhci_ring *ep_ring; - struct xhci_virt_ep *ep; - - xhci = hcd_to_xhci(hcd); - spin_lock_irqsave(&xhci->lock, flags); - /* Make sure the URB hasn't completed or been unlinked already */ - ret = usb_hcd_check_unlink_urb(hcd, urb, status); - if (ret || !urb->hcpriv) - goto done; - temp = xhci_readl(xhci, &xhci->op_regs->status); - if (temp == 0xffffffff) { - xhci_dbg(xhci, "HW died, freeing TD.\n"); - td = (struct xhci_td *) urb->hcpriv; - - usb_hcd_unlink_urb_from_ep(hcd, urb); - spin_unlock_irqrestore(&xhci->lock, flags); - usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, -ESHUTDOWN); - kfree(td); - return ret; - } - if (xhci->xhc_state & XHCI_STATE_DYING) { - xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on " - "non-responsive xHCI host.\n", - urb->ep->desc.bEndpointAddress, urb); - /* Let the stop endpoint command watchdog timer (which set this - * state) finish cleaning up the endpoint TD lists. We must - * have caught it in the middle of dropping a lock and giving - * back an URB. - */ - goto done; - } - - xhci_dbg(xhci, "Cancel URB %p\n", urb); - xhci_dbg(xhci, "Event ring:\n"); - xhci_debug_ring(xhci, xhci->event_ring); - ep_index = xhci_get_endpoint_index(&urb->ep->desc); - ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; - ep_ring = ep->ring; - xhci_dbg(xhci, "Endpoint ring:\n"); - xhci_debug_ring(xhci, ep_ring); - td = (struct xhci_td *) urb->hcpriv; - - list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); - /* Queue a stop endpoint command, but only if this is - * the first cancellation to be handled. - */ - if (!(ep->ep_state & EP_HALT_PENDING)) { - ep->ep_state |= EP_HALT_PENDING; - ep->stop_cmds_pending++; - ep->stop_cmd_timer.expires = jiffies + - XHCI_STOP_EP_CMD_TIMEOUT * HZ; - add_timer(&ep->stop_cmd_timer); - xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index); - xhci_ring_cmd_db(xhci); - } -done: - spin_unlock_irqrestore(&xhci->lock, flags); - return ret; -} - -/* Drop an endpoint from a new bandwidth configuration for this device. - * Only one call to this function is allowed per endpoint before - * check_bandwidth() or reset_bandwidth() must be called. - * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will - * add the endpoint to the schedule with possibly new parameters denoted by a - * different endpoint descriptor in usb_host_endpoint. - * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is - * not allowed. - * - * The USB core will not allow URBs to be queued to an endpoint that is being - * disabled, so there's no need for mutual exclusion to protect - * the xhci->devs[slot_id] structure. - */ -int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, - struct usb_host_endpoint *ep) -{ - struct xhci_hcd *xhci; - struct xhci_container_ctx *in_ctx, *out_ctx; - struct xhci_input_control_ctx *ctrl_ctx; - struct xhci_slot_ctx *slot_ctx; - unsigned int last_ctx; - unsigned int ep_index; - struct xhci_ep_ctx *ep_ctx; - u32 drop_flag; - u32 new_add_flags, new_drop_flags, new_slot_info; - int ret; - - ret = xhci_check_args(hcd, udev, ep, 1, __func__); - if (ret <= 0) - return ret; - xhci = hcd_to_xhci(hcd); - xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); - - drop_flag = xhci_get_endpoint_flag(&ep->desc); - if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) { - xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n", - __func__, drop_flag); - return 0; - } - - if (!xhci->devs || !xhci->devs[udev->slot_id]) { - xhci_warn(xhci, "xHCI %s called with unaddressed device\n", - __func__); - return -EINVAL; - } - - in_ctx = xhci->devs[udev->slot_id]->in_ctx; - out_ctx = xhci->devs[udev->slot_id]->out_ctx; - ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); - ep_index = xhci_get_endpoint_index(&ep->desc); - ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); - /* If the HC already knows the endpoint is disabled, - * or the HCD has noted it is disabled, ignore this request - */ - if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED || - ctrl_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) { - xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", - __func__, ep); - return 0; - } - - ctrl_ctx->drop_flags |= drop_flag; - new_drop_flags = ctrl_ctx->drop_flags; - - ctrl_ctx->add_flags &= ~drop_flag; - new_add_flags = ctrl_ctx->add_flags; - - last_ctx = xhci_last_valid_endpoint(ctrl_ctx->add_flags); - slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); - /* Update the last valid endpoint context, if we deleted the last one */ - if ((slot_ctx->dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) { - slot_ctx->dev_info &= ~LAST_CTX_MASK; - slot_ctx->dev_info |= LAST_CTX(last_ctx); - } - new_slot_info = slot_ctx->dev_info; - - xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); - - xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", - (unsigned int) ep->desc.bEndpointAddress, - udev->slot_id, - (unsigned int) new_drop_flags, - (unsigned int) new_add_flags, - (unsigned int) new_slot_info); - return 0; -} - -/* Add an endpoint to a new possible bandwidth configuration for this device. - * Only one call to this function is allowed per endpoint before - * check_bandwidth() or reset_bandwidth() must be called. - * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will - * add the endpoint to the schedule with possibly new parameters denoted by a - * different endpoint descriptor in usb_host_endpoint. - * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is - * not allowed. - * - * The USB core will not allow URBs to be queued to an endpoint until the - * configuration or alt setting is installed in the device, so there's no need - * for mutual exclusion to protect the xhci->devs[slot_id] structure. - */ -int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, - struct usb_host_endpoint *ep) -{ - struct xhci_hcd *xhci; - struct xhci_container_ctx *in_ctx, *out_ctx; - unsigned int ep_index; - struct xhci_ep_ctx *ep_ctx; - struct xhci_slot_ctx *slot_ctx; - struct xhci_input_control_ctx *ctrl_ctx; - u32 added_ctxs; - unsigned int last_ctx; - u32 new_add_flags, new_drop_flags, new_slot_info; - int ret = 0; - - ret = xhci_check_args(hcd, udev, ep, 1, __func__); - if (ret <= 0) { - /* So we won't queue a reset ep command for a root hub */ - ep->hcpriv = NULL; - return ret; - } - xhci = hcd_to_xhci(hcd); - - added_ctxs = xhci_get_endpoint_flag(&ep->desc); - last_ctx = xhci_last_valid_endpoint(added_ctxs); - if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) { - /* FIXME when we have to issue an evaluate endpoint command to - * deal with ep0 max packet size changing once we get the - * descriptors - */ - xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n", - __func__, added_ctxs); - return 0; - } - - if (!xhci->devs || !xhci->devs[udev->slot_id]) { - xhci_warn(xhci, "xHCI %s called with unaddressed device\n", - __func__); - return -EINVAL; - } - - in_ctx = xhci->devs[udev->slot_id]->in_ctx; - out_ctx = xhci->devs[udev->slot_id]->out_ctx; - ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); - ep_index = xhci_get_endpoint_index(&ep->desc); - ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); - /* If the HCD has already noted the endpoint is enabled, - * ignore this request. - */ - if (ctrl_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) { - xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", - __func__, ep); - return 0; - } - - /* - * Configuration and alternate setting changes must be done in - * process context, not interrupt context (or so documenation - * for usb_set_interface() and usb_set_configuration() claim). - */ - if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id], - udev, ep, GFP_NOIO) < 0) { - dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n", - __func__, ep->desc.bEndpointAddress); - return -ENOMEM; - } - - ctrl_ctx->add_flags |= added_ctxs; - new_add_flags = ctrl_ctx->add_flags; - - /* If xhci_endpoint_disable() was called for this endpoint, but the - * xHC hasn't been notified yet through the check_bandwidth() call, - * this re-adds a new state for the endpoint from the new endpoint - * descriptors. We must drop and re-add this endpoint, so we leave the - * drop flags alone. - */ - new_drop_flags = ctrl_ctx->drop_flags; - - slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); - /* Update the last valid endpoint context, if we just added one past */ - if ((slot_ctx->dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) { - slot_ctx->dev_info &= ~LAST_CTX_MASK; - slot_ctx->dev_info |= LAST_CTX(last_ctx); - } - new_slot_info = slot_ctx->dev_info; - - /* Store the usb_device pointer for later use */ - ep->hcpriv = udev; - - xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", - (unsigned int) ep->desc.bEndpointAddress, - udev->slot_id, - (unsigned int) new_drop_flags, - (unsigned int) new_add_flags, - (unsigned int) new_slot_info); - return 0; -} - -static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) -{ - struct xhci_input_control_ctx *ctrl_ctx; - struct xhci_ep_ctx *ep_ctx; - struct xhci_slot_ctx *slot_ctx; - int i; - - /* When a device's add flag and drop flag are zero, any subsequent - * configure endpoint command will leave that endpoint's state - * untouched. Make sure we don't leave any old state in the input - * endpoint contexts. - */ - ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); - ctrl_ctx->drop_flags = 0; - ctrl_ctx->add_flags = 0; - slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); - slot_ctx->dev_info &= ~LAST_CTX_MASK; - /* Endpoint 0 is always valid */ - slot_ctx->dev_info |= LAST_CTX(1); - for (i = 1; i < 31; ++i) { - ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); - ep_ctx->ep_info = 0; - ep_ctx->ep_info2 = 0; - ep_ctx->deq = 0; - ep_ctx->tx_info = 0; - } -} - -static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, - struct usb_device *udev, int *cmd_status) -{ - int ret; - - switch (*cmd_status) { - case COMP_ENOMEM: - dev_warn(&udev->dev, "Not enough host controller resources " - "for new device state.\n"); - ret = -ENOMEM; - /* FIXME: can we allocate more resources for the HC? */ - break; - case COMP_BW_ERR: - dev_warn(&udev->dev, "Not enough bandwidth " - "for new device state.\n"); - ret = -ENOSPC; - /* FIXME: can we go back to the old state? */ - break; - case COMP_TRB_ERR: - /* the HCD set up something wrong */ - dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, " - "add flag = 1, " - "and endpoint is not disabled.\n"); - ret = -EINVAL; - break; - case COMP_SUCCESS: - dev_dbg(&udev->dev, "Successful Endpoint Configure command\n"); - ret = 0; - break; - default: - xhci_err(xhci, "ERROR: unexpected command completion " - "code 0x%x.\n", *cmd_status); - ret = -EINVAL; - break; - } - return ret; -} - -static int xhci_evaluate_context_result(struct xhci_hcd *xhci, - struct usb_device *udev, int *cmd_status) -{ - int ret; - struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; - - switch (*cmd_status) { - case COMP_EINVAL: - dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate " - "context command.\n"); - ret = -EINVAL; - break; - case COMP_EBADSLT: - dev_warn(&udev->dev, "WARN: slot not enabled for" - "evaluate context command.\n"); - case COMP_CTX_STATE: - dev_warn(&udev->dev, "WARN: invalid context state for " - "evaluate context command.\n"); - xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1); - ret = -EINVAL; - break; - case COMP_SUCCESS: - dev_dbg(&udev->dev, "Successful evaluate context command\n"); - ret = 0; - break; - default: - xhci_err(xhci, "ERROR: unexpected command completion " - "code 0x%x.\n", *cmd_status); - ret = -EINVAL; - break; - } - return ret; -} - -/* Issue a configure endpoint command or evaluate context command - * and wait for it to finish. - */ -static int xhci_configure_endpoint(struct xhci_hcd *xhci, - struct usb_device *udev, - struct xhci_command *command, - bool ctx_change, bool must_succeed) -{ - int ret; - int timeleft; - unsigned long flags; - struct xhci_container_ctx *in_ctx; - struct completion *cmd_completion; - int *cmd_status; - struct xhci_virt_device *virt_dev; - - spin_lock_irqsave(&xhci->lock, flags); - virt_dev = xhci->devs[udev->slot_id]; - if (command) { - in_ctx = command->in_ctx; - cmd_completion = command->completion; - cmd_status = &command->status; - command->command_trb = xhci->cmd_ring->enqueue; - list_add_tail(&command->cmd_list, &virt_dev->cmd_list); - } else { - in_ctx = virt_dev->in_ctx; - cmd_completion = &virt_dev->cmd_completion; - cmd_status = &virt_dev->cmd_status; - } - - if (!ctx_change) - ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma, - udev->slot_id, must_succeed); - else - ret = xhci_queue_evaluate_context(xhci, in_ctx->dma, - udev->slot_id); - if (ret < 0) { - if (command) - list_del(&command->cmd_list); - spin_unlock_irqrestore(&xhci->lock, flags); - xhci_dbg(xhci, "FIXME allocate a new ring segment\n"); - return -ENOMEM; - } - xhci_ring_cmd_db(xhci); - spin_unlock_irqrestore(&xhci->lock, flags); - - /* Wait for the configure endpoint command to complete */ - timeleft = wait_for_completion_interruptible_timeout( - cmd_completion, - USB_CTRL_SET_TIMEOUT); - if (timeleft <= 0) { - xhci_warn(xhci, "%s while waiting for %s command\n", - timeleft == 0 ? "Timeout" : "Signal", - ctx_change == 0 ? - "configure endpoint" : - "evaluate context"); - /* FIXME cancel the configure endpoint command */ - return -ETIME; - } - - if (!ctx_change) - return xhci_configure_endpoint_result(xhci, udev, cmd_status); - return xhci_evaluate_context_result(xhci, udev, cmd_status); -} - -/* Called after one or more calls to xhci_add_endpoint() or - * xhci_drop_endpoint(). If this call fails, the USB core is expected - * to call xhci_reset_bandwidth(). - * - * Since we are in the middle of changing either configuration or - * installing a new alt setting, the USB core won't allow URBs to be - * enqueued for any endpoint on the old config or interface. Nothing - * else should be touching the xhci->devs[slot_id] structure, so we - * don't need to take the xhci->lock for manipulating that. - */ -int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) -{ - int i; - int ret = 0; - struct xhci_hcd *xhci; - struct xhci_virt_device *virt_dev; - struct xhci_input_control_ctx *ctrl_ctx; - struct xhci_slot_ctx *slot_ctx; - - ret = xhci_check_args(hcd, udev, NULL, 0, __func__); - if (ret <= 0) - return ret; - xhci = hcd_to_xhci(hcd); - - if (!udev->slot_id || !xhci->devs || !xhci->devs[udev->slot_id]) { - xhci_warn(xhci, "xHCI %s called with unaddressed device\n", - __func__); - return -EINVAL; - } - xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); - virt_dev = xhci->devs[udev->slot_id]; - - /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ - ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); - ctrl_ctx->add_flags |= SLOT_FLAG; - ctrl_ctx->add_flags &= ~EP0_FLAG; - ctrl_ctx->drop_flags &= ~SLOT_FLAG; - ctrl_ctx->drop_flags &= ~EP0_FLAG; - xhci_dbg(xhci, "New Input Control Context:\n"); - slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); - xhci_dbg_ctx(xhci, virt_dev->in_ctx, - LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); - - ret = xhci_configure_endpoint(xhci, udev, NULL, - false, false); - if (ret) { - /* Callee should call reset_bandwidth() */ - return ret; - } - - xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); - xhci_dbg_ctx(xhci, virt_dev->out_ctx, - LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); - - xhci_zero_in_ctx(xhci, virt_dev); - /* Install new rings and free or cache any old rings */ - for (i = 1; i < 31; ++i) { - if (!virt_dev->eps[i].new_ring) - continue; - /* Only cache or free the old ring if it exists. - * It may not if this is the first add of an endpoint. - */ - if (virt_dev->eps[i].ring) { - xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); - } - virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; - virt_dev->eps[i].new_ring = NULL; - } - - return ret; -} - -void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) -{ - struct xhci_hcd *xhci; - struct xhci_virt_device *virt_dev; - int i, ret; - - ret = xhci_check_args(hcd, udev, NULL, 0, __func__); - if (ret <= 0) - return; - xhci = hcd_to_xhci(hcd); - - if (!xhci->devs || !xhci->devs[udev->slot_id]) { - xhci_warn(xhci, "xHCI %s called with unaddressed device\n", - __func__); - return; - } - xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); - virt_dev = xhci->devs[udev->slot_id]; - /* Free any rings allocated for added endpoints */ - for (i = 0; i < 31; ++i) { - if (virt_dev->eps[i].new_ring) { - xhci_ring_free(xhci, virt_dev->eps[i].new_ring); - virt_dev->eps[i].new_ring = NULL; - } - } - xhci_zero_in_ctx(xhci, virt_dev); -} - -static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, - struct xhci_container_ctx *in_ctx, - struct xhci_container_ctx *out_ctx, - u32 add_flags, u32 drop_flags) -{ - struct xhci_input_control_ctx *ctrl_ctx; - ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); - ctrl_ctx->add_flags = add_flags; - ctrl_ctx->drop_flags = drop_flags; - xhci_slot_copy(xhci, in_ctx, out_ctx); - ctrl_ctx->add_flags |= SLOT_FLAG; - - xhci_dbg(xhci, "Input Context:\n"); - xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); -} - -void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, - unsigned int slot_id, unsigned int ep_index, - struct xhci_dequeue_state *deq_state) -{ - struct xhci_container_ctx *in_ctx; - struct xhci_ep_ctx *ep_ctx; - u32 added_ctxs; - dma_addr_t addr; - - xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, - xhci->devs[slot_id]->out_ctx, ep_index); - in_ctx = xhci->devs[slot_id]->in_ctx; - ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); - addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg, - deq_state->new_deq_ptr); - if (addr == 0) { - xhci_warn(xhci, "WARN Cannot submit config ep after " - "reset ep command\n"); - xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n", - deq_state->new_deq_seg, - deq_state->new_deq_ptr); - return; - } - ep_ctx->deq = addr | deq_state->new_cycle_state; - - added_ctxs = xhci_get_endpoint_flag_from_index(ep_index); - xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx, - xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs); -} - -void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, - struct usb_device *udev, unsigned int ep_index) -{ - struct xhci_dequeue_state deq_state; - struct xhci_virt_ep *ep; - - xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n"); - ep = &xhci->devs[udev->slot_id]->eps[ep_index]; - /* We need to move the HW's dequeue pointer past this TD, - * or it will attempt to resend it on the next doorbell ring. - */ - xhci_find_new_dequeue_state(xhci, udev->slot_id, - ep_index, ep->stopped_td, - &deq_state); - - /* HW with the reset endpoint quirk will use the saved dequeue state to - * issue a configure endpoint command later. - */ - if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { - xhci_dbg(xhci, "Queueing new dequeue state\n"); - xhci_queue_new_dequeue_state(xhci, udev->slot_id, - ep_index, &deq_state); - } else { - /* Better hope no one uses the input context between now and the - * reset endpoint completion! - */ - xhci_dbg(xhci, "Setting up input context for " - "configure endpoint command\n"); - xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id, - ep_index, &deq_state); - } -} - -/* Deal with stalled endpoints. The core should have sent the control message - * to clear the halt condition. However, we need to make the xHCI hardware - * reset its sequence number, since a device will expect a sequence number of - * zero after the halt condition is cleared. - * Context: in_interrupt - */ -void xhci_endpoint_reset(struct usb_hcd *hcd, - struct usb_host_endpoint *ep) -{ - struct xhci_hcd *xhci; - struct usb_device *udev; - unsigned int ep_index; - unsigned long flags; - int ret; - struct xhci_virt_ep *virt_ep; - - xhci = hcd_to_xhci(hcd); - udev = (struct usb_device *) ep->hcpriv; - /* Called with a root hub endpoint (or an endpoint that wasn't added - * with xhci_add_endpoint() - */ - if (!ep->hcpriv) - return; - ep_index = xhci_get_endpoint_index(&ep->desc); - virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index]; - if (!virt_ep->stopped_td) { - xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n", - ep->desc.bEndpointAddress); - return; - } - if (usb_endpoint_xfer_control(&ep->desc)) { - xhci_dbg(xhci, "Control endpoint stall already handled.\n"); - return; - } - - xhci_dbg(xhci, "Queueing reset endpoint command\n"); - spin_lock_irqsave(&xhci->lock, flags); - ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index); - /* - * Can't change the ring dequeue pointer until it's transitioned to the - * stopped state, which is only upon a successful reset endpoint - * command. Better hope that last command worked! - */ - if (!ret) { - xhci_cleanup_stalled_ring(xhci, udev, ep_index); - kfree(virt_ep->stopped_td); - xhci_ring_cmd_db(xhci); - } - spin_unlock_irqrestore(&xhci->lock, flags); - - if (ret) - xhci_warn(xhci, "FIXME allocate a new ring segment\n"); -} - -/* - * This submits a Reset Device Command, which will set the device state to 0, - * set the device address to 0, and disable all the endpoints except the default - * control endpoint. The USB core should come back and call - * xhci_address_device(), and then re-set up the configuration. If this is - * called because of a usb_reset_and_verify_device(), then the old alternate - * settings will be re-installed through the normal bandwidth allocation - * functions. - * - * Wait for the Reset Device command to finish. Remove all structures - * associated with the endpoints that were disabled. Clear the input device - * structure? Cache the rings? Reset the control endpoint 0 max packet size? - */ -int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev) -{ - int ret, i; - unsigned long flags; - struct xhci_hcd *xhci; - unsigned int slot_id; - struct xhci_virt_device *virt_dev; - struct xhci_command *reset_device_cmd; - int timeleft; - int last_freed_endpoint; - - ret = xhci_check_args(hcd, udev, NULL, 0, __func__); - if (ret <= 0) - return ret; - xhci = hcd_to_xhci(hcd); - slot_id = udev->slot_id; - virt_dev = xhci->devs[slot_id]; - if (!virt_dev) { - xhci_dbg(xhci, "%s called with invalid slot ID %u\n", - __func__, slot_id); - return -EINVAL; - } - - xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); - /* Allocate the command structure that holds the struct completion. - * Assume we're in process context, since the normal device reset - * process has to wait for the device anyway. Storage devices are - * reset as part of error handling, so use GFP_NOIO instead of - * GFP_KERNEL. - */ - reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO); - if (!reset_device_cmd) { - xhci_dbg(xhci, "Couldn't allocate command structure.\n"); - return -ENOMEM; - } - - /* Attempt to submit the Reset Device command to the command ring */ - spin_lock_irqsave(&xhci->lock, flags); - reset_device_cmd->command_trb = xhci->cmd_ring->enqueue; - list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list); - ret = xhci_queue_reset_device(xhci, slot_id); - if (ret) { - xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); - list_del(&reset_device_cmd->cmd_list); - spin_unlock_irqrestore(&xhci->lock, flags); - goto command_cleanup; - } - xhci_ring_cmd_db(xhci); - spin_unlock_irqrestore(&xhci->lock, flags); - - /* Wait for the Reset Device command to finish */ - timeleft = wait_for_completion_interruptible_timeout( - reset_device_cmd->completion, - USB_CTRL_SET_TIMEOUT); - if (timeleft <= 0) { - xhci_warn(xhci, "%s while waiting for reset device command\n", - timeleft == 0 ? "Timeout" : "Signal"); - spin_lock_irqsave(&xhci->lock, flags); - /* The timeout might have raced with the event ring handler, so - * only delete from the list if the item isn't poisoned. - */ - if (reset_device_cmd->cmd_list.next != LIST_POISON1) - list_del(&reset_device_cmd->cmd_list); - spin_unlock_irqrestore(&xhci->lock, flags); - ret = -ETIME; - goto command_cleanup; - } - - /* The Reset Device command can't fail, according to the 0.95/0.96 spec, - * unless we tried to reset a slot ID that wasn't enabled, - * or the device wasn't in the addressed or configured state. - */ - ret = reset_device_cmd->status; - switch (ret) { - case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */ - case COMP_CTX_STATE: /* 0.96 completion code for same thing */ - xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n", - slot_id, - xhci_get_slot_state(xhci, virt_dev->out_ctx)); - xhci_info(xhci, "Not freeing device rings.\n"); - /* Don't treat this as an error. May change my mind later. */ - ret = 0; - goto command_cleanup; - case COMP_SUCCESS: - xhci_dbg(xhci, "Successful reset device command.\n"); - break; - default: - if (xhci_is_vendor_info_code(xhci, ret)) - break; - xhci_warn(xhci, "Unknown completion code %u for " - "reset device command.\n", ret); - ret = -EINVAL; - goto command_cleanup; - } - - /* Everything but endpoint 0 is disabled, so free or cache the rings. */ - last_freed_endpoint = 1; - for (i = 1; i < 31; ++i) { - if (!virt_dev->eps[i].ring) - continue; - xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); - last_freed_endpoint = i; - } - xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); - xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); - ret = 0; - -command_cleanup: - xhci_free_command(xhci, reset_device_cmd); - return ret; -} - -/* - * At this point, the struct usb_device is about to go away, the device has - * disconnected, and all traffic has been stopped and the endpoints have been - * disabled. Free any HC data structures associated with that device. - */ -void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) -{ - struct xhci_hcd *xhci = hcd_to_xhci(hcd); - struct xhci_virt_device *virt_dev; - unsigned long flags; - u32 state; - int i; - - if (udev->slot_id == 0) - return; - virt_dev = xhci->devs[udev->slot_id]; - if (!virt_dev) - return; - - /* Stop any wayward timer functions (which may grab the lock) */ - for (i = 0; i < 31; ++i) { - virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING; - del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); - } - - spin_lock_irqsave(&xhci->lock, flags); - /* Don't disable the slot if the host controller is dead. */ - state = xhci_readl(xhci, &xhci->op_regs->status); - if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) { - xhci_free_virt_device(xhci, udev->slot_id); - spin_unlock_irqrestore(&xhci->lock, flags); - return; - } - - if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) { - spin_unlock_irqrestore(&xhci->lock, flags); - xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); - return; - } - xhci_ring_cmd_db(xhci); - spin_unlock_irqrestore(&xhci->lock, flags); - /* - * Event command completion handler will free any data structures - * associated with the slot. XXX Can free sleep? - */ -} - -/* - * Returns 0 if the xHC ran out of device slots, the Enable Slot command - * timed out, or allocating memory failed. Returns 1 on success. - */ -int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) -{ - struct xhci_hcd *xhci = hcd_to_xhci(hcd); - unsigned long flags; - int timeleft; - int ret; - - spin_lock_irqsave(&xhci->lock, flags); - ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0); - if (ret) { - spin_unlock_irqrestore(&xhci->lock, flags); - xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); - return 0; - } - xhci_ring_cmd_db(xhci); - spin_unlock_irqrestore(&xhci->lock, flags); - - /* XXX: how much time for xHC slot assignment? */ - timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, - USB_CTRL_SET_TIMEOUT); - if (timeleft <= 0) { - xhci_warn(xhci, "%s while waiting for a slot\n", - timeleft == 0 ? "Timeout" : "Signal"); - /* FIXME cancel the enable slot request */ - return 0; - } - - if (!xhci->slot_id) { - xhci_err(xhci, "Error while assigning device slot ID\n"); - return 0; - } - /* xhci_alloc_virt_device() does not touch rings; no need to lock */ - if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) { - /* Disable slot, if we can do it without mem alloc */ - xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); - spin_lock_irqsave(&xhci->lock, flags); - if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) - xhci_ring_cmd_db(xhci); - spin_unlock_irqrestore(&xhci->lock, flags); - return 0; - } - udev->slot_id = xhci->slot_id; - /* Is this a LS or FS device under a HS hub? */ - /* Hub or peripherial? */ - return 1; -} - -/* - * Issue an Address Device command (which will issue a SetAddress request to - * the device). - * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so - * we should only issue and wait on one address command at the same time. - * - * We add one to the device address issued by the hardware because the USB core - * uses address 1 for the root hubs (even though they're not really devices). - */ -int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) -{ - unsigned long flags; - int timeleft; - struct xhci_virt_device *virt_dev; - int ret = 0; - struct xhci_hcd *xhci = hcd_to_xhci(hcd); - struct xhci_slot_ctx *slot_ctx; - struct xhci_input_control_ctx *ctrl_ctx; - u64 temp_64; - - if (!udev->slot_id) { - xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id); - return -EINVAL; - } - - virt_dev = xhci->devs[udev->slot_id]; - - /* If this is a Set Address to an unconfigured device, setup ep 0 */ - if (!udev->config) - xhci_setup_addressable_virt_dev(xhci, udev); - /* Otherwise, assume the core has the device configured how it wants */ - xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); - xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); - - spin_lock_irqsave(&xhci->lock, flags); - ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma, - udev->slot_id); - if (ret) { - spin_unlock_irqrestore(&xhci->lock, flags); - xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); - return ret; - } - xhci_ring_cmd_db(xhci); - spin_unlock_irqrestore(&xhci->lock, flags); - - /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ - timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, - USB_CTRL_SET_TIMEOUT); - /* FIXME: From section 4.3.4: "Software shall be responsible for timing - * the SetAddress() "recovery interval" required by USB and aborting the - * command on a timeout. - */ - if (timeleft <= 0) { - xhci_warn(xhci, "%s while waiting for a slot\n", - timeleft == 0 ? "Timeout" : "Signal"); - /* FIXME cancel the address device command */ - return -ETIME; - } - - switch (virt_dev->cmd_status) { - case COMP_CTX_STATE: - case COMP_EBADSLT: - xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n", - udev->slot_id); - ret = -EINVAL; - break; - case COMP_TX_ERR: - dev_warn(&udev->dev, "Device not responding to set address.\n"); - ret = -EPROTO; - break; - case COMP_SUCCESS: - xhci_dbg(xhci, "Successful Address Device command\n"); - break; - default: - xhci_err(xhci, "ERROR: unexpected command completion " - "code 0x%x.\n", virt_dev->cmd_status); - xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); - xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); - ret = -EINVAL; - break; - } - if (ret) { - return ret; - } - temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); - xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64); - xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n", - udev->slot_id, - &xhci->dcbaa->dev_context_ptrs[udev->slot_id], - (unsigned long long) - xhci->dcbaa->dev_context_ptrs[udev->slot_id]); - xhci_dbg(xhci, "Output Context DMA address = %#08llx\n", - (unsigned long long)virt_dev->out_ctx->dma); - xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); - xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); - xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); - xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); - /* - * USB core uses address 1 for the roothubs, so we add one to the - * address given back to us by the HC. - */ - slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); - udev->devnum = (slot_ctx->dev_state & DEV_ADDR_MASK) + 1; - /* Zero the input context control for later use */ - ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); - ctrl_ctx->add_flags = 0; - ctrl_ctx->drop_flags = 0; - - xhci_dbg(xhci, "Device address = %d\n", udev->devnum); - /* XXX Meh, not sure if anyone else but choose_address uses this. */ - set_bit(udev->devnum, udev->bus->devmap.devicemap); - - return 0; -} - -/* Once a hub descriptor is fetched for a device, we need to update the xHC's - * internal data structures for the device. - */ -int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, - struct usb_tt *tt, gfp_t mem_flags) -{ - struct xhci_hcd *xhci = hcd_to_xhci(hcd); - struct xhci_virt_device *vdev; - struct xhci_command *config_cmd; - struct xhci_input_control_ctx *ctrl_ctx; - struct xhci_slot_ctx *slot_ctx; - unsigned long flags; - unsigned think_time; - int ret; - - /* Ignore root hubs */ - if (!hdev->parent) - return 0; - - vdev = xhci->devs[hdev->slot_id]; - if (!vdev) { - xhci_warn(xhci, "Cannot update hub desc for unknown device.\n"); - return -EINVAL; - } - config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); - if (!config_cmd) { - xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); - return -ENOMEM; - } - - spin_lock_irqsave(&xhci->lock, flags); - xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); - ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx); - ctrl_ctx->add_flags |= SLOT_FLAG; - slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); - slot_ctx->dev_info |= DEV_HUB; - if (tt->multi) - slot_ctx->dev_info |= DEV_MTT; - if (xhci->hci_version > 0x95) { - xhci_dbg(xhci, "xHCI version %x needs hub " - "TT think time and number of ports\n", - (unsigned int) xhci->hci_version); - slot_ctx->dev_info2 |= XHCI_MAX_PORTS(hdev->maxchild); - /* Set TT think time - convert from ns to FS bit times. - * 0 = 8 FS bit times, 1 = 16 FS bit times, - * 2 = 24 FS bit times, 3 = 32 FS bit times. - */ - think_time = tt->think_time; - if (think_time != 0) - think_time = (think_time / 666) - 1; - slot_ctx->tt_info |= TT_THINK_TIME(think_time); - } else { - xhci_dbg(xhci, "xHCI version %x doesn't need hub " - "TT think time or number of ports\n", - (unsigned int) xhci->hci_version); - } - slot_ctx->dev_state = 0; - spin_unlock_irqrestore(&xhci->lock, flags); - - xhci_dbg(xhci, "Set up %s for hub device.\n", - (xhci->hci_version > 0x95) ? - "configure endpoint" : "evaluate context"); - xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id); - xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0); - - /* Issue and wait for the configure endpoint or - * evaluate context command. - */ - if (xhci->hci_version > 0x95) - ret = xhci_configure_endpoint(xhci, hdev, config_cmd, - false, false); - else - ret = xhci_configure_endpoint(xhci, hdev, config_cmd, - true, false); - - xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id); - xhci_dbg_ctx(xhci, vdev->out_ctx, 0); - - xhci_free_command(xhci, config_cmd); - return ret; -} - -int xhci_get_frame(struct usb_hcd *hcd) -{ - struct xhci_hcd *xhci = hcd_to_xhci(hcd); - /* EHCI mods by the periodic size. Why? */ - return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3; -} - -MODULE_DESCRIPTION(DRIVER_DESC); -MODULE_AUTHOR(DRIVER_AUTHOR); -MODULE_LICENSE("GPL"); - -static int __init xhci_hcd_init(void) -{ -#ifdef CONFIG_PCI - int retval = 0; - - retval = xhci_register_pci(); - - if (retval < 0) { - printk(KERN_DEBUG "Problem registering PCI driver."); - return retval; - } -#endif - /* - * Check the compiler generated sizes of structures that must be laid - * out in specific ways for hardware access. - */ - BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); - BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8); - BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8); - /* xhci_device_control has eight fields, and also - * embeds one xhci_slot_ctx and 31 xhci_ep_ctx - */ - BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); - BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); - BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); - BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8); - BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8); - /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */ - BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); - BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); - return 0; -} -module_init(xhci_hcd_init); - -static void __exit xhci_hcd_cleanup(void) -{ -#ifdef CONFIG_PCI - xhci_unregister_pci(); -#endif -} -module_exit(xhci_hcd_cleanup); diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c new file mode 100644 index 000000000000..4cb69e0af834 --- /dev/null +++ b/drivers/usb/host/xhci.c @@ -0,0 +1,1916 @@ +/* + * xHCI host controller driver + * + * Copyright (C) 2008 Intel Corp. + * + * Author: Sarah Sharp + * Some code borrowed from the Linux EHCI driver. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include + +#include "xhci.h" + +#define DRIVER_AUTHOR "Sarah Sharp" +#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" + +/* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ +static int link_quirk; +module_param(link_quirk, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB"); + +/* TODO: copied from ehci-hcd.c - can this be refactored? */ +/* + * handshake - spin reading hc until handshake completes or fails + * @ptr: address of hc register to be read + * @mask: bits to look at in result of read + * @done: value of those bits when handshake succeeds + * @usec: timeout in microseconds + * + * Returns negative errno, or zero on success + * + * Success happens when the "mask" bits have the specified value (hardware + * handshake done). There are two failure modes: "usec" have passed (major + * hardware flakeout), or the register reads as all-ones (hardware removed). + */ +static int handshake(struct xhci_hcd *xhci, void __iomem *ptr, + u32 mask, u32 done, int usec) +{ + u32 result; + + do { + result = xhci_readl(xhci, ptr); + if (result == ~(u32)0) /* card removed */ + return -ENODEV; + result &= mask; + if (result == done) + return 0; + udelay(1); + usec--; + } while (usec > 0); + return -ETIMEDOUT; +} + +/* + * Disable interrupts and begin the xHCI halting process. + */ +void xhci_quiesce(struct xhci_hcd *xhci) +{ + u32 halted; + u32 cmd; + u32 mask; + + mask = ~(XHCI_IRQS); + halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT; + if (!halted) + mask &= ~CMD_RUN; + + cmd = xhci_readl(xhci, &xhci->op_regs->command); + cmd &= mask; + xhci_writel(xhci, cmd, &xhci->op_regs->command); +} + +/* + * Force HC into halt state. + * + * Disable any IRQs and clear the run/stop bit. + * HC will complete any current and actively pipelined transactions, and + * should halt within 16 microframes of the run/stop bit being cleared. + * Read HC Halted bit in the status register to see when the HC is finished. + * XXX: shouldn't we set HC_STATE_HALT here somewhere? + */ +int xhci_halt(struct xhci_hcd *xhci) +{ + xhci_dbg(xhci, "// Halt the HC\n"); + xhci_quiesce(xhci); + + return handshake(xhci, &xhci->op_regs->status, + STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); +} + +/* + * Reset a halted HC, and set the internal HC state to HC_STATE_HALT. + * + * This resets pipelines, timers, counters, state machines, etc. + * Transactions will be terminated immediately, and operational registers + * will be set to their defaults. + */ +int xhci_reset(struct xhci_hcd *xhci) +{ + u32 command; + u32 state; + + state = xhci_readl(xhci, &xhci->op_regs->status); + if ((state & STS_HALT) == 0) { + xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); + return 0; + } + + xhci_dbg(xhci, "// Reset the HC\n"); + command = xhci_readl(xhci, &xhci->op_regs->command); + command |= CMD_RESET; + xhci_writel(xhci, command, &xhci->op_regs->command); + /* XXX: Why does EHCI set this here? Shouldn't other code do this? */ + xhci_to_hcd(xhci)->state = HC_STATE_HALT; + + return handshake(xhci, &xhci->op_regs->command, CMD_RESET, 0, 250 * 1000); +} + + +#if 0 +/* Set up MSI-X table for entry 0 (may claim other entries later) */ +static int xhci_setup_msix(struct xhci_hcd *xhci) +{ + int ret; + struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); + + xhci->msix_count = 0; + /* XXX: did I do this right? ixgbe does kcalloc for more than one */ + xhci->msix_entries = kmalloc(sizeof(struct msix_entry), GFP_KERNEL); + if (!xhci->msix_entries) { + xhci_err(xhci, "Failed to allocate MSI-X entries\n"); + return -ENOMEM; + } + xhci->msix_entries[0].entry = 0; + + ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count); + if (ret) { + xhci_err(xhci, "Failed to enable MSI-X\n"); + goto free_entries; + } + + /* + * Pass the xhci pointer value as the request_irq "cookie". + * If more irqs are added, this will need to be unique for each one. + */ + ret = request_irq(xhci->msix_entries[0].vector, &xhci_irq, 0, + "xHCI", xhci_to_hcd(xhci)); + if (ret) { + xhci_err(xhci, "Failed to allocate MSI-X interrupt\n"); + goto disable_msix; + } + xhci_dbg(xhci, "Finished setting up MSI-X\n"); + return 0; + +disable_msix: + pci_disable_msix(pdev); +free_entries: + kfree(xhci->msix_entries); + xhci->msix_entries = NULL; + return ret; +} + +/* XXX: code duplication; can xhci_setup_msix call this? */ +/* Free any IRQs and disable MSI-X */ +static void xhci_cleanup_msix(struct xhci_hcd *xhci) +{ + struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); + if (!xhci->msix_entries) + return; + + free_irq(xhci->msix_entries[0].vector, xhci); + pci_disable_msix(pdev); + kfree(xhci->msix_entries); + xhci->msix_entries = NULL; + xhci_dbg(xhci, "Finished cleaning up MSI-X\n"); +} +#endif + +/* + * Initialize memory for HCD and xHC (one-time init). + * + * Program the PAGESIZE register, initialize the device context array, create + * device contexts (?), set up a command ring segment (or two?), create event + * ring (one for now). + */ +int xhci_init(struct usb_hcd *hcd) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + int retval = 0; + + xhci_dbg(xhci, "xhci_init\n"); + spin_lock_init(&xhci->lock); + if (link_quirk) { + xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n"); + xhci->quirks |= XHCI_LINK_TRB_QUIRK; + } else { + xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n"); + } + retval = xhci_mem_init(xhci, GFP_KERNEL); + xhci_dbg(xhci, "Finished xhci_init\n"); + + return retval; +} + +/* + * Called in interrupt context when there might be work + * queued on the event ring + * + * xhci->lock must be held by caller. + */ +static void xhci_work(struct xhci_hcd *xhci) +{ + u32 temp; + u64 temp_64; + + /* + * Clear the op reg interrupt status first, + * so we can receive interrupts from other MSI-X interrupters. + * Write 1 to clear the interrupt status. + */ + temp = xhci_readl(xhci, &xhci->op_regs->status); + temp |= STS_EINT; + xhci_writel(xhci, temp, &xhci->op_regs->status); + /* FIXME when MSI-X is supported and there are multiple vectors */ + /* Clear the MSI-X event interrupt status */ + + /* Acknowledge the interrupt */ + temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); + temp |= 0x3; + xhci_writel(xhci, temp, &xhci->ir_set->irq_pending); + /* Flush posted writes */ + xhci_readl(xhci, &xhci->ir_set->irq_pending); + + if (xhci->xhc_state & XHCI_STATE_DYING) + xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " + "Shouldn't IRQs be disabled?\n"); + else + /* FIXME this should be a delayed service routine + * that clears the EHB. + */ + xhci_handle_event(xhci); + + /* Clear the event handler busy flag (RW1C); the event ring should be empty. */ + temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); + xhci_write_64(xhci, temp_64 | ERST_EHB, &xhci->ir_set->erst_dequeue); + /* Flush posted writes -- FIXME is this necessary? */ + xhci_readl(xhci, &xhci->ir_set->irq_pending); +} + +/*-------------------------------------------------------------------------*/ + +/* + * xHCI spec says we can get an interrupt, and if the HC has an error condition, + * we might get bad data out of the event ring. Section 4.10.2.7 has a list of + * indicators of an event TRB error, but we check the status *first* to be safe. + */ +irqreturn_t xhci_irq(struct usb_hcd *hcd) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + u32 temp, temp2; + union xhci_trb *trb; + + spin_lock(&xhci->lock); + trb = xhci->event_ring->dequeue; + /* Check if the xHC generated the interrupt, or the irq is shared */ + temp = xhci_readl(xhci, &xhci->op_regs->status); + temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending); + if (temp == 0xffffffff && temp2 == 0xffffffff) + goto hw_died; + + if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) { + spin_unlock(&xhci->lock); + return IRQ_NONE; + } + xhci_dbg(xhci, "op reg status = %08x\n", temp); + xhci_dbg(xhci, "ir set irq_pending = %08x\n", temp2); + xhci_dbg(xhci, "Event ring dequeue ptr:\n"); + xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n", + (unsigned long long)xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb), + lower_32_bits(trb->link.segment_ptr), + upper_32_bits(trb->link.segment_ptr), + (unsigned int) trb->link.intr_target, + (unsigned int) trb->link.control); + + if (temp & STS_FATAL) { + xhci_warn(xhci, "WARNING: Host System Error\n"); + xhci_halt(xhci); +hw_died: + xhci_to_hcd(xhci)->state = HC_STATE_HALT; + spin_unlock(&xhci->lock); + return -ESHUTDOWN; + } + + xhci_work(xhci); + spin_unlock(&xhci->lock); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING +void xhci_event_ring_work(unsigned long arg) +{ + unsigned long flags; + int temp; + u64 temp_64; + struct xhci_hcd *xhci = (struct xhci_hcd *) arg; + int i, j; + + xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies); + + spin_lock_irqsave(&xhci->lock, flags); + temp = xhci_readl(xhci, &xhci->op_regs->status); + xhci_dbg(xhci, "op reg status = 0x%x\n", temp); + if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) { + xhci_dbg(xhci, "HW died, polling stopped.\n"); + spin_unlock_irqrestore(&xhci->lock, flags); + return; + } + + temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); + xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp); + xhci_dbg(xhci, "No-op commands handled = %d\n", xhci->noops_handled); + xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask); + xhci->error_bitmask = 0; + xhci_dbg(xhci, "Event ring:\n"); + xhci_debug_segment(xhci, xhci->event_ring->deq_seg); + xhci_dbg_ring_ptrs(xhci, xhci->event_ring); + temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); + temp_64 &= ~ERST_PTR_MASK; + xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); + xhci_dbg(xhci, "Command ring:\n"); + xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg); + xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); + xhci_dbg_cmd_ptrs(xhci); + for (i = 0; i < MAX_HC_SLOTS; ++i) { + if (!xhci->devs[i]) + continue; + for (j = 0; j < 31; ++j) { + struct xhci_ring *ring = xhci->devs[i]->eps[j].ring; + if (!ring) + continue; + xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j); + xhci_debug_segment(xhci, ring->deq_seg); + } + } + + if (xhci->noops_submitted != NUM_TEST_NOOPS) + if (xhci_setup_one_noop(xhci)) + xhci_ring_cmd_db(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); + + if (!xhci->zombie) + mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ); + else + xhci_dbg(xhci, "Quit polling the event ring.\n"); +} +#endif + +/* + * Start the HC after it was halted. + * + * This function is called by the USB core when the HC driver is added. + * Its opposite is xhci_stop(). + * + * xhci_init() must be called once before this function can be called. + * Reset the HC, enable device slot contexts, program DCBAAP, and + * set command ring pointer and event ring pointer. + * + * Setup MSI-X vectors and enable interrupts. + */ +int xhci_run(struct usb_hcd *hcd) +{ + u32 temp; + u64 temp_64; + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + void (*doorbell)(struct xhci_hcd *) = NULL; + + hcd->uses_new_polling = 1; + hcd->poll_rh = 0; + + xhci_dbg(xhci, "xhci_run\n"); +#if 0 /* FIXME: MSI not setup yet */ + /* Do this at the very last minute */ + ret = xhci_setup_msix(xhci); + if (!ret) + return ret; + + return -ENOSYS; +#endif +#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING + init_timer(&xhci->event_ring_timer); + xhci->event_ring_timer.data = (unsigned long) xhci; + xhci->event_ring_timer.function = xhci_event_ring_work; + /* Poll the event ring */ + xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ; + xhci->zombie = 0; + xhci_dbg(xhci, "Setting event ring polling timer\n"); + add_timer(&xhci->event_ring_timer); +#endif + + xhci_dbg(xhci, "Command ring memory map follows:\n"); + xhci_debug_ring(xhci, xhci->cmd_ring); + xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); + xhci_dbg_cmd_ptrs(xhci); + + xhci_dbg(xhci, "ERST memory map follows:\n"); + xhci_dbg_erst(xhci, &xhci->erst); + xhci_dbg(xhci, "Event ring:\n"); + xhci_debug_ring(xhci, xhci->event_ring); + xhci_dbg_ring_ptrs(xhci, xhci->event_ring); + temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); + temp_64 &= ~ERST_PTR_MASK; + xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); + + xhci_dbg(xhci, "// Set the interrupt modulation register\n"); + temp = xhci_readl(xhci, &xhci->ir_set->irq_control); + temp &= ~ER_IRQ_INTERVAL_MASK; + temp |= (u32) 160; + xhci_writel(xhci, temp, &xhci->ir_set->irq_control); + + /* Set the HCD state before we enable the irqs */ + hcd->state = HC_STATE_RUNNING; + temp = xhci_readl(xhci, &xhci->op_regs->command); + temp |= (CMD_EIE); + xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n", + temp); + xhci_writel(xhci, temp, &xhci->op_regs->command); + + temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); + xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n", + xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); + xhci_writel(xhci, ER_IRQ_ENABLE(temp), + &xhci->ir_set->irq_pending); + xhci_print_ir_set(xhci, xhci->ir_set, 0); + + if (NUM_TEST_NOOPS > 0) + doorbell = xhci_setup_one_noop(xhci); + + temp = xhci_readl(xhci, &xhci->op_regs->command); + temp |= (CMD_RUN); + xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n", + temp); + xhci_writel(xhci, temp, &xhci->op_regs->command); + /* Flush PCI posted writes */ + temp = xhci_readl(xhci, &xhci->op_regs->command); + xhci_dbg(xhci, "// @%p = 0x%x\n", &xhci->op_regs->command, temp); + if (doorbell) + (*doorbell)(xhci); + + xhci_dbg(xhci, "Finished xhci_run\n"); + return 0; +} + +/* + * Stop xHCI driver. + * + * This function is called by the USB core when the HC driver is removed. + * Its opposite is xhci_run(). + * + * Disable device contexts, disable IRQs, and quiesce the HC. + * Reset the HC, finish any completed transactions, and cleanup memory. + */ +void xhci_stop(struct usb_hcd *hcd) +{ + u32 temp; + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + + spin_lock_irq(&xhci->lock); + xhci_halt(xhci); + xhci_reset(xhci); + spin_unlock_irq(&xhci->lock); + +#if 0 /* No MSI yet */ + xhci_cleanup_msix(xhci); +#endif +#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING + /* Tell the event ring poll function not to reschedule */ + xhci->zombie = 1; + del_timer_sync(&xhci->event_ring_timer); +#endif + + xhci_dbg(xhci, "// Disabling event ring interrupts\n"); + temp = xhci_readl(xhci, &xhci->op_regs->status); + xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); + temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); + xhci_writel(xhci, ER_IRQ_DISABLE(temp), + &xhci->ir_set->irq_pending); + xhci_print_ir_set(xhci, xhci->ir_set, 0); + + xhci_dbg(xhci, "cleaning up memory\n"); + xhci_mem_cleanup(xhci); + xhci_dbg(xhci, "xhci_stop completed - status = %x\n", + xhci_readl(xhci, &xhci->op_regs->status)); +} + +/* + * Shutdown HC (not bus-specific) + * + * This is called when the machine is rebooting or halting. We assume that the + * machine will be powered off, and the HC's internal state will be reset. + * Don't bother to free memory. + */ +void xhci_shutdown(struct usb_hcd *hcd) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + + spin_lock_irq(&xhci->lock); + xhci_halt(xhci); + spin_unlock_irq(&xhci->lock); + +#if 0 + xhci_cleanup_msix(xhci); +#endif + + xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n", + xhci_readl(xhci, &xhci->op_regs->status)); +} + +/*-------------------------------------------------------------------------*/ + +/** + * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and + * HCDs. Find the index for an endpoint given its descriptor. Use the return + * value to right shift 1 for the bitmask. + * + * Index = (epnum * 2) + direction - 1, + * where direction = 0 for OUT, 1 for IN. + * For control endpoints, the IN index is used (OUT index is unused), so + * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2) + */ +unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc) +{ + unsigned int index; + if (usb_endpoint_xfer_control(desc)) + index = (unsigned int) (usb_endpoint_num(desc)*2); + else + index = (unsigned int) (usb_endpoint_num(desc)*2) + + (usb_endpoint_dir_in(desc) ? 1 : 0) - 1; + return index; +} + +/* Find the flag for this endpoint (for use in the control context). Use the + * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is + * bit 1, etc. + */ +unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc) +{ + return 1 << (xhci_get_endpoint_index(desc) + 1); +} + +/* Find the flag for this endpoint (for use in the control context). Use the + * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is + * bit 1, etc. + */ +unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index) +{ + return 1 << (ep_index + 1); +} + +/* Compute the last valid endpoint context index. Basically, this is the + * endpoint index plus one. For slot contexts with more than valid endpoint, + * we find the most significant bit set in the added contexts flags. + * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000 + * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one. + */ +unsigned int xhci_last_valid_endpoint(u32 added_ctxs) +{ + return fls(added_ctxs) - 1; +} + +/* Returns 1 if the arguments are OK; + * returns 0 this is a root hub; returns -EINVAL for NULL pointers. + */ +int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint *ep, int check_ep, const char *func) { + if (!hcd || (check_ep && !ep) || !udev) { + printk(KERN_DEBUG "xHCI %s called with invalid args\n", + func); + return -EINVAL; + } + if (!udev->parent) { + printk(KERN_DEBUG "xHCI %s called for root hub\n", + func); + return 0; + } + if (!udev->slot_id) { + printk(KERN_DEBUG "xHCI %s called with unaddressed device\n", + func); + return -EINVAL; + } + return 1; +} + +static int xhci_configure_endpoint(struct xhci_hcd *xhci, + struct usb_device *udev, struct xhci_command *command, + bool ctx_change, bool must_succeed); + +/* + * Full speed devices may have a max packet size greater than 8 bytes, but the + * USB core doesn't know that until it reads the first 8 bytes of the + * descriptor. If the usb_device's max packet size changes after that point, + * we need to issue an evaluate context command and wait on it. + */ +static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, + unsigned int ep_index, struct urb *urb) +{ + struct xhci_container_ctx *in_ctx; + struct xhci_container_ctx *out_ctx; + struct xhci_input_control_ctx *ctrl_ctx; + struct xhci_ep_ctx *ep_ctx; + int max_packet_size; + int hw_max_packet_size; + int ret = 0; + + out_ctx = xhci->devs[slot_id]->out_ctx; + ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); + hw_max_packet_size = MAX_PACKET_DECODED(ep_ctx->ep_info2); + max_packet_size = urb->dev->ep0.desc.wMaxPacketSize; + if (hw_max_packet_size != max_packet_size) { + xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n"); + xhci_dbg(xhci, "Max packet size in usb_device = %d\n", + max_packet_size); + xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n", + hw_max_packet_size); + xhci_dbg(xhci, "Issuing evaluate context command.\n"); + + /* Set up the modified control endpoint 0 */ + xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, + xhci->devs[slot_id]->out_ctx, ep_index); + in_ctx = xhci->devs[slot_id]->in_ctx; + ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); + ep_ctx->ep_info2 &= ~MAX_PACKET_MASK; + ep_ctx->ep_info2 |= MAX_PACKET(max_packet_size); + + /* Set up the input context flags for the command */ + /* FIXME: This won't work if a non-default control endpoint + * changes max packet sizes. + */ + ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); + ctrl_ctx->add_flags = EP0_FLAG; + ctrl_ctx->drop_flags = 0; + + xhci_dbg(xhci, "Slot %d input context\n", slot_id); + xhci_dbg_ctx(xhci, in_ctx, ep_index); + xhci_dbg(xhci, "Slot %d output context\n", slot_id); + xhci_dbg_ctx(xhci, out_ctx, ep_index); + + ret = xhci_configure_endpoint(xhci, urb->dev, NULL, + true, false); + + /* Clean up the input context for later use by bandwidth + * functions. + */ + ctrl_ctx->add_flags = SLOT_FLAG; + } + return ret; +} + +/* + * non-error returns are a promise to giveback() the urb later + * we drop ownership so next owner (or urb unlink) can get it + */ +int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + unsigned long flags; + int ret = 0; + unsigned int slot_id, ep_index; + + + if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0) + return -EINVAL; + + slot_id = urb->dev->slot_id; + ep_index = xhci_get_endpoint_index(&urb->ep->desc); + + if (!xhci->devs || !xhci->devs[slot_id]) { + if (!in_interrupt()) + dev_warn(&urb->dev->dev, "WARN: urb submitted for dev with no Slot ID\n"); + ret = -EINVAL; + goto exit; + } + if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) { + if (!in_interrupt()) + xhci_dbg(xhci, "urb submitted during PCI suspend\n"); + ret = -ESHUTDOWN; + goto exit; + } + if (usb_endpoint_xfer_control(&urb->ep->desc)) { + /* Check to see if the max packet size for the default control + * endpoint changed during FS device enumeration + */ + if (urb->dev->speed == USB_SPEED_FULL) { + ret = xhci_check_maxpacket(xhci, slot_id, + ep_index, urb); + if (ret < 0) + return ret; + } + + /* We have a spinlock and interrupts disabled, so we must pass + * atomic context to this function, which may allocate memory. + */ + spin_lock_irqsave(&xhci->lock, flags); + if (xhci->xhc_state & XHCI_STATE_DYING) + goto dying; + ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, + slot_id, ep_index); + spin_unlock_irqrestore(&xhci->lock, flags); + } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { + spin_lock_irqsave(&xhci->lock, flags); + if (xhci->xhc_state & XHCI_STATE_DYING) + goto dying; + ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, + slot_id, ep_index); + spin_unlock_irqrestore(&xhci->lock, flags); + } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { + spin_lock_irqsave(&xhci->lock, flags); + if (xhci->xhc_state & XHCI_STATE_DYING) + goto dying; + ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, + slot_id, ep_index); + spin_unlock_irqrestore(&xhci->lock, flags); + } else { + ret = -EINVAL; + } +exit: + return ret; +dying: + xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " + "non-responsive xHCI host.\n", + urb->ep->desc.bEndpointAddress, urb); + spin_unlock_irqrestore(&xhci->lock, flags); + return -ESHUTDOWN; +} + +/* + * Remove the URB's TD from the endpoint ring. This may cause the HC to stop + * USB transfers, potentially stopping in the middle of a TRB buffer. The HC + * should pick up where it left off in the TD, unless a Set Transfer Ring + * Dequeue Pointer is issued. + * + * The TRBs that make up the buffers for the canceled URB will be "removed" from + * the ring. Since the ring is a contiguous structure, they can't be physically + * removed. Instead, there are two options: + * + * 1) If the HC is in the middle of processing the URB to be canceled, we + * simply move the ring's dequeue pointer past those TRBs using the Set + * Transfer Ring Dequeue Pointer command. This will be the common case, + * when drivers timeout on the last submitted URB and attempt to cancel. + * + * 2) If the HC is in the middle of a different TD, we turn the TRBs into a + * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The + * HC will need to invalidate the any TRBs it has cached after the stop + * endpoint command, as noted in the xHCI 0.95 errata. + * + * 3) The TD may have completed by the time the Stop Endpoint Command + * completes, so software needs to handle that case too. + * + * This function should protect against the TD enqueueing code ringing the + * doorbell while this code is waiting for a Stop Endpoint command to complete. + * It also needs to account for multiple cancellations on happening at the same + * time for the same endpoint. + * + * Note that this function can be called in any context, or so says + * usb_hcd_unlink_urb() + */ +int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) +{ + unsigned long flags; + int ret; + u32 temp; + struct xhci_hcd *xhci; + struct xhci_td *td; + unsigned int ep_index; + struct xhci_ring *ep_ring; + struct xhci_virt_ep *ep; + + xhci = hcd_to_xhci(hcd); + spin_lock_irqsave(&xhci->lock, flags); + /* Make sure the URB hasn't completed or been unlinked already */ + ret = usb_hcd_check_unlink_urb(hcd, urb, status); + if (ret || !urb->hcpriv) + goto done; + temp = xhci_readl(xhci, &xhci->op_regs->status); + if (temp == 0xffffffff) { + xhci_dbg(xhci, "HW died, freeing TD.\n"); + td = (struct xhci_td *) urb->hcpriv; + + usb_hcd_unlink_urb_from_ep(hcd, urb); + spin_unlock_irqrestore(&xhci->lock, flags); + usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, -ESHUTDOWN); + kfree(td); + return ret; + } + if (xhci->xhc_state & XHCI_STATE_DYING) { + xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on " + "non-responsive xHCI host.\n", + urb->ep->desc.bEndpointAddress, urb); + /* Let the stop endpoint command watchdog timer (which set this + * state) finish cleaning up the endpoint TD lists. We must + * have caught it in the middle of dropping a lock and giving + * back an URB. + */ + goto done; + } + + xhci_dbg(xhci, "Cancel URB %p\n", urb); + xhci_dbg(xhci, "Event ring:\n"); + xhci_debug_ring(xhci, xhci->event_ring); + ep_index = xhci_get_endpoint_index(&urb->ep->desc); + ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; + ep_ring = ep->ring; + xhci_dbg(xhci, "Endpoint ring:\n"); + xhci_debug_ring(xhci, ep_ring); + td = (struct xhci_td *) urb->hcpriv; + + list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); + /* Queue a stop endpoint command, but only if this is + * the first cancellation to be handled. + */ + if (!(ep->ep_state & EP_HALT_PENDING)) { + ep->ep_state |= EP_HALT_PENDING; + ep->stop_cmds_pending++; + ep->stop_cmd_timer.expires = jiffies + + XHCI_STOP_EP_CMD_TIMEOUT * HZ; + add_timer(&ep->stop_cmd_timer); + xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index); + xhci_ring_cmd_db(xhci); + } +done: + spin_unlock_irqrestore(&xhci->lock, flags); + return ret; +} + +/* Drop an endpoint from a new bandwidth configuration for this device. + * Only one call to this function is allowed per endpoint before + * check_bandwidth() or reset_bandwidth() must be called. + * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will + * add the endpoint to the schedule with possibly new parameters denoted by a + * different endpoint descriptor in usb_host_endpoint. + * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is + * not allowed. + * + * The USB core will not allow URBs to be queued to an endpoint that is being + * disabled, so there's no need for mutual exclusion to protect + * the xhci->devs[slot_id] structure. + */ +int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint *ep) +{ + struct xhci_hcd *xhci; + struct xhci_container_ctx *in_ctx, *out_ctx; + struct xhci_input_control_ctx *ctrl_ctx; + struct xhci_slot_ctx *slot_ctx; + unsigned int last_ctx; + unsigned int ep_index; + struct xhci_ep_ctx *ep_ctx; + u32 drop_flag; + u32 new_add_flags, new_drop_flags, new_slot_info; + int ret; + + ret = xhci_check_args(hcd, udev, ep, 1, __func__); + if (ret <= 0) + return ret; + xhci = hcd_to_xhci(hcd); + xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); + + drop_flag = xhci_get_endpoint_flag(&ep->desc); + if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) { + xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n", + __func__, drop_flag); + return 0; + } + + if (!xhci->devs || !xhci->devs[udev->slot_id]) { + xhci_warn(xhci, "xHCI %s called with unaddressed device\n", + __func__); + return -EINVAL; + } + + in_ctx = xhci->devs[udev->slot_id]->in_ctx; + out_ctx = xhci->devs[udev->slot_id]->out_ctx; + ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); + ep_index = xhci_get_endpoint_index(&ep->desc); + ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); + /* If the HC already knows the endpoint is disabled, + * or the HCD has noted it is disabled, ignore this request + */ + if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED || + ctrl_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) { + xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", + __func__, ep); + return 0; + } + + ctrl_ctx->drop_flags |= drop_flag; + new_drop_flags = ctrl_ctx->drop_flags; + + ctrl_ctx->add_flags &= ~drop_flag; + new_add_flags = ctrl_ctx->add_flags; + + last_ctx = xhci_last_valid_endpoint(ctrl_ctx->add_flags); + slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); + /* Update the last valid endpoint context, if we deleted the last one */ + if ((slot_ctx->dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) { + slot_ctx->dev_info &= ~LAST_CTX_MASK; + slot_ctx->dev_info |= LAST_CTX(last_ctx); + } + new_slot_info = slot_ctx->dev_info; + + xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); + + xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", + (unsigned int) ep->desc.bEndpointAddress, + udev->slot_id, + (unsigned int) new_drop_flags, + (unsigned int) new_add_flags, + (unsigned int) new_slot_info); + return 0; +} + +/* Add an endpoint to a new possible bandwidth configuration for this device. + * Only one call to this function is allowed per endpoint before + * check_bandwidth() or reset_bandwidth() must be called. + * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will + * add the endpoint to the schedule with possibly new parameters denoted by a + * different endpoint descriptor in usb_host_endpoint. + * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is + * not allowed. + * + * The USB core will not allow URBs to be queued to an endpoint until the + * configuration or alt setting is installed in the device, so there's no need + * for mutual exclusion to protect the xhci->devs[slot_id] structure. + */ +int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, + struct usb_host_endpoint *ep) +{ + struct xhci_hcd *xhci; + struct xhci_container_ctx *in_ctx, *out_ctx; + unsigned int ep_index; + struct xhci_ep_ctx *ep_ctx; + struct xhci_slot_ctx *slot_ctx; + struct xhci_input_control_ctx *ctrl_ctx; + u32 added_ctxs; + unsigned int last_ctx; + u32 new_add_flags, new_drop_flags, new_slot_info; + int ret = 0; + + ret = xhci_check_args(hcd, udev, ep, 1, __func__); + if (ret <= 0) { + /* So we won't queue a reset ep command for a root hub */ + ep->hcpriv = NULL; + return ret; + } + xhci = hcd_to_xhci(hcd); + + added_ctxs = xhci_get_endpoint_flag(&ep->desc); + last_ctx = xhci_last_valid_endpoint(added_ctxs); + if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) { + /* FIXME when we have to issue an evaluate endpoint command to + * deal with ep0 max packet size changing once we get the + * descriptors + */ + xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n", + __func__, added_ctxs); + return 0; + } + + if (!xhci->devs || !xhci->devs[udev->slot_id]) { + xhci_warn(xhci, "xHCI %s called with unaddressed device\n", + __func__); + return -EINVAL; + } + + in_ctx = xhci->devs[udev->slot_id]->in_ctx; + out_ctx = xhci->devs[udev->slot_id]->out_ctx; + ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); + ep_index = xhci_get_endpoint_index(&ep->desc); + ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); + /* If the HCD has already noted the endpoint is enabled, + * ignore this request. + */ + if (ctrl_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) { + xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", + __func__, ep); + return 0; + } + + /* + * Configuration and alternate setting changes must be done in + * process context, not interrupt context (or so documenation + * for usb_set_interface() and usb_set_configuration() claim). + */ + if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id], + udev, ep, GFP_NOIO) < 0) { + dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n", + __func__, ep->desc.bEndpointAddress); + return -ENOMEM; + } + + ctrl_ctx->add_flags |= added_ctxs; + new_add_flags = ctrl_ctx->add_flags; + + /* If xhci_endpoint_disable() was called for this endpoint, but the + * xHC hasn't been notified yet through the check_bandwidth() call, + * this re-adds a new state for the endpoint from the new endpoint + * descriptors. We must drop and re-add this endpoint, so we leave the + * drop flags alone. + */ + new_drop_flags = ctrl_ctx->drop_flags; + + slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); + /* Update the last valid endpoint context, if we just added one past */ + if ((slot_ctx->dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) { + slot_ctx->dev_info &= ~LAST_CTX_MASK; + slot_ctx->dev_info |= LAST_CTX(last_ctx); + } + new_slot_info = slot_ctx->dev_info; + + /* Store the usb_device pointer for later use */ + ep->hcpriv = udev; + + xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", + (unsigned int) ep->desc.bEndpointAddress, + udev->slot_id, + (unsigned int) new_drop_flags, + (unsigned int) new_add_flags, + (unsigned int) new_slot_info); + return 0; +} + +static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) +{ + struct xhci_input_control_ctx *ctrl_ctx; + struct xhci_ep_ctx *ep_ctx; + struct xhci_slot_ctx *slot_ctx; + int i; + + /* When a device's add flag and drop flag are zero, any subsequent + * configure endpoint command will leave that endpoint's state + * untouched. Make sure we don't leave any old state in the input + * endpoint contexts. + */ + ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); + ctrl_ctx->drop_flags = 0; + ctrl_ctx->add_flags = 0; + slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); + slot_ctx->dev_info &= ~LAST_CTX_MASK; + /* Endpoint 0 is always valid */ + slot_ctx->dev_info |= LAST_CTX(1); + for (i = 1; i < 31; ++i) { + ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); + ep_ctx->ep_info = 0; + ep_ctx->ep_info2 = 0; + ep_ctx->deq = 0; + ep_ctx->tx_info = 0; + } +} + +static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, + struct usb_device *udev, int *cmd_status) +{ + int ret; + + switch (*cmd_status) { + case COMP_ENOMEM: + dev_warn(&udev->dev, "Not enough host controller resources " + "for new device state.\n"); + ret = -ENOMEM; + /* FIXME: can we allocate more resources for the HC? */ + break; + case COMP_BW_ERR: + dev_warn(&udev->dev, "Not enough bandwidth " + "for new device state.\n"); + ret = -ENOSPC; + /* FIXME: can we go back to the old state? */ + break; + case COMP_TRB_ERR: + /* the HCD set up something wrong */ + dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, " + "add flag = 1, " + "and endpoint is not disabled.\n"); + ret = -EINVAL; + break; + case COMP_SUCCESS: + dev_dbg(&udev->dev, "Successful Endpoint Configure command\n"); + ret = 0; + break; + default: + xhci_err(xhci, "ERROR: unexpected command completion " + "code 0x%x.\n", *cmd_status); + ret = -EINVAL; + break; + } + return ret; +} + +static int xhci_evaluate_context_result(struct xhci_hcd *xhci, + struct usb_device *udev, int *cmd_status) +{ + int ret; + struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; + + switch (*cmd_status) { + case COMP_EINVAL: + dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate " + "context command.\n"); + ret = -EINVAL; + break; + case COMP_EBADSLT: + dev_warn(&udev->dev, "WARN: slot not enabled for" + "evaluate context command.\n"); + case COMP_CTX_STATE: + dev_warn(&udev->dev, "WARN: invalid context state for " + "evaluate context command.\n"); + xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1); + ret = -EINVAL; + break; + case COMP_SUCCESS: + dev_dbg(&udev->dev, "Successful evaluate context command\n"); + ret = 0; + break; + default: + xhci_err(xhci, "ERROR: unexpected command completion " + "code 0x%x.\n", *cmd_status); + ret = -EINVAL; + break; + } + return ret; +} + +/* Issue a configure endpoint command or evaluate context command + * and wait for it to finish. + */ +static int xhci_configure_endpoint(struct xhci_hcd *xhci, + struct usb_device *udev, + struct xhci_command *command, + bool ctx_change, bool must_succeed) +{ + int ret; + int timeleft; + unsigned long flags; + struct xhci_container_ctx *in_ctx; + struct completion *cmd_completion; + int *cmd_status; + struct xhci_virt_device *virt_dev; + + spin_lock_irqsave(&xhci->lock, flags); + virt_dev = xhci->devs[udev->slot_id]; + if (command) { + in_ctx = command->in_ctx; + cmd_completion = command->completion; + cmd_status = &command->status; + command->command_trb = xhci->cmd_ring->enqueue; + list_add_tail(&command->cmd_list, &virt_dev->cmd_list); + } else { + in_ctx = virt_dev->in_ctx; + cmd_completion = &virt_dev->cmd_completion; + cmd_status = &virt_dev->cmd_status; + } + + if (!ctx_change) + ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma, + udev->slot_id, must_succeed); + else + ret = xhci_queue_evaluate_context(xhci, in_ctx->dma, + udev->slot_id); + if (ret < 0) { + if (command) + list_del(&command->cmd_list); + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_dbg(xhci, "FIXME allocate a new ring segment\n"); + return -ENOMEM; + } + xhci_ring_cmd_db(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); + + /* Wait for the configure endpoint command to complete */ + timeleft = wait_for_completion_interruptible_timeout( + cmd_completion, + USB_CTRL_SET_TIMEOUT); + if (timeleft <= 0) { + xhci_warn(xhci, "%s while waiting for %s command\n", + timeleft == 0 ? "Timeout" : "Signal", + ctx_change == 0 ? + "configure endpoint" : + "evaluate context"); + /* FIXME cancel the configure endpoint command */ + return -ETIME; + } + + if (!ctx_change) + return xhci_configure_endpoint_result(xhci, udev, cmd_status); + return xhci_evaluate_context_result(xhci, udev, cmd_status); +} + +/* Called after one or more calls to xhci_add_endpoint() or + * xhci_drop_endpoint(). If this call fails, the USB core is expected + * to call xhci_reset_bandwidth(). + * + * Since we are in the middle of changing either configuration or + * installing a new alt setting, the USB core won't allow URBs to be + * enqueued for any endpoint on the old config or interface. Nothing + * else should be touching the xhci->devs[slot_id] structure, so we + * don't need to take the xhci->lock for manipulating that. + */ +int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) +{ + int i; + int ret = 0; + struct xhci_hcd *xhci; + struct xhci_virt_device *virt_dev; + struct xhci_input_control_ctx *ctrl_ctx; + struct xhci_slot_ctx *slot_ctx; + + ret = xhci_check_args(hcd, udev, NULL, 0, __func__); + if (ret <= 0) + return ret; + xhci = hcd_to_xhci(hcd); + + if (!udev->slot_id || !xhci->devs || !xhci->devs[udev->slot_id]) { + xhci_warn(xhci, "xHCI %s called with unaddressed device\n", + __func__); + return -EINVAL; + } + xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); + virt_dev = xhci->devs[udev->slot_id]; + + /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ + ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); + ctrl_ctx->add_flags |= SLOT_FLAG; + ctrl_ctx->add_flags &= ~EP0_FLAG; + ctrl_ctx->drop_flags &= ~SLOT_FLAG; + ctrl_ctx->drop_flags &= ~EP0_FLAG; + xhci_dbg(xhci, "New Input Control Context:\n"); + slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); + xhci_dbg_ctx(xhci, virt_dev->in_ctx, + LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); + + ret = xhci_configure_endpoint(xhci, udev, NULL, + false, false); + if (ret) { + /* Callee should call reset_bandwidth() */ + return ret; + } + + xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); + xhci_dbg_ctx(xhci, virt_dev->out_ctx, + LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); + + xhci_zero_in_ctx(xhci, virt_dev); + /* Install new rings and free or cache any old rings */ + for (i = 1; i < 31; ++i) { + if (!virt_dev->eps[i].new_ring) + continue; + /* Only cache or free the old ring if it exists. + * It may not if this is the first add of an endpoint. + */ + if (virt_dev->eps[i].ring) { + xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); + } + virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; + virt_dev->eps[i].new_ring = NULL; + } + + return ret; +} + +void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) +{ + struct xhci_hcd *xhci; + struct xhci_virt_device *virt_dev; + int i, ret; + + ret = xhci_check_args(hcd, udev, NULL, 0, __func__); + if (ret <= 0) + return; + xhci = hcd_to_xhci(hcd); + + if (!xhci->devs || !xhci->devs[udev->slot_id]) { + xhci_warn(xhci, "xHCI %s called with unaddressed device\n", + __func__); + return; + } + xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); + virt_dev = xhci->devs[udev->slot_id]; + /* Free any rings allocated for added endpoints */ + for (i = 0; i < 31; ++i) { + if (virt_dev->eps[i].new_ring) { + xhci_ring_free(xhci, virt_dev->eps[i].new_ring); + virt_dev->eps[i].new_ring = NULL; + } + } + xhci_zero_in_ctx(xhci, virt_dev); +} + +static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, + struct xhci_container_ctx *in_ctx, + struct xhci_container_ctx *out_ctx, + u32 add_flags, u32 drop_flags) +{ + struct xhci_input_control_ctx *ctrl_ctx; + ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); + ctrl_ctx->add_flags = add_flags; + ctrl_ctx->drop_flags = drop_flags; + xhci_slot_copy(xhci, in_ctx, out_ctx); + ctrl_ctx->add_flags |= SLOT_FLAG; + + xhci_dbg(xhci, "Input Context:\n"); + xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); +} + +void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, + unsigned int slot_id, unsigned int ep_index, + struct xhci_dequeue_state *deq_state) +{ + struct xhci_container_ctx *in_ctx; + struct xhci_ep_ctx *ep_ctx; + u32 added_ctxs; + dma_addr_t addr; + + xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, + xhci->devs[slot_id]->out_ctx, ep_index); + in_ctx = xhci->devs[slot_id]->in_ctx; + ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); + addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg, + deq_state->new_deq_ptr); + if (addr == 0) { + xhci_warn(xhci, "WARN Cannot submit config ep after " + "reset ep command\n"); + xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n", + deq_state->new_deq_seg, + deq_state->new_deq_ptr); + return; + } + ep_ctx->deq = addr | deq_state->new_cycle_state; + + added_ctxs = xhci_get_endpoint_flag_from_index(ep_index); + xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx, + xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs); +} + +void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, + struct usb_device *udev, unsigned int ep_index) +{ + struct xhci_dequeue_state deq_state; + struct xhci_virt_ep *ep; + + xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n"); + ep = &xhci->devs[udev->slot_id]->eps[ep_index]; + /* We need to move the HW's dequeue pointer past this TD, + * or it will attempt to resend it on the next doorbell ring. + */ + xhci_find_new_dequeue_state(xhci, udev->slot_id, + ep_index, ep->stopped_td, + &deq_state); + + /* HW with the reset endpoint quirk will use the saved dequeue state to + * issue a configure endpoint command later. + */ + if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { + xhci_dbg(xhci, "Queueing new dequeue state\n"); + xhci_queue_new_dequeue_state(xhci, udev->slot_id, + ep_index, &deq_state); + } else { + /* Better hope no one uses the input context between now and the + * reset endpoint completion! + */ + xhci_dbg(xhci, "Setting up input context for " + "configure endpoint command\n"); + xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id, + ep_index, &deq_state); + } +} + +/* Deal with stalled endpoints. The core should have sent the control message + * to clear the halt condition. However, we need to make the xHCI hardware + * reset its sequence number, since a device will expect a sequence number of + * zero after the halt condition is cleared. + * Context: in_interrupt + */ +void xhci_endpoint_reset(struct usb_hcd *hcd, + struct usb_host_endpoint *ep) +{ + struct xhci_hcd *xhci; + struct usb_device *udev; + unsigned int ep_index; + unsigned long flags; + int ret; + struct xhci_virt_ep *virt_ep; + + xhci = hcd_to_xhci(hcd); + udev = (struct usb_device *) ep->hcpriv; + /* Called with a root hub endpoint (or an endpoint that wasn't added + * with xhci_add_endpoint() + */ + if (!ep->hcpriv) + return; + ep_index = xhci_get_endpoint_index(&ep->desc); + virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index]; + if (!virt_ep->stopped_td) { + xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n", + ep->desc.bEndpointAddress); + return; + } + if (usb_endpoint_xfer_control(&ep->desc)) { + xhci_dbg(xhci, "Control endpoint stall already handled.\n"); + return; + } + + xhci_dbg(xhci, "Queueing reset endpoint command\n"); + spin_lock_irqsave(&xhci->lock, flags); + ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index); + /* + * Can't change the ring dequeue pointer until it's transitioned to the + * stopped state, which is only upon a successful reset endpoint + * command. Better hope that last command worked! + */ + if (!ret) { + xhci_cleanup_stalled_ring(xhci, udev, ep_index); + kfree(virt_ep->stopped_td); + xhci_ring_cmd_db(xhci); + } + spin_unlock_irqrestore(&xhci->lock, flags); + + if (ret) + xhci_warn(xhci, "FIXME allocate a new ring segment\n"); +} + +/* + * This submits a Reset Device Command, which will set the device state to 0, + * set the device address to 0, and disable all the endpoints except the default + * control endpoint. The USB core should come back and call + * xhci_address_device(), and then re-set up the configuration. If this is + * called because of a usb_reset_and_verify_device(), then the old alternate + * settings will be re-installed through the normal bandwidth allocation + * functions. + * + * Wait for the Reset Device command to finish. Remove all structures + * associated with the endpoints that were disabled. Clear the input device + * structure? Cache the rings? Reset the control endpoint 0 max packet size? + */ +int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev) +{ + int ret, i; + unsigned long flags; + struct xhci_hcd *xhci; + unsigned int slot_id; + struct xhci_virt_device *virt_dev; + struct xhci_command *reset_device_cmd; + int timeleft; + int last_freed_endpoint; + + ret = xhci_check_args(hcd, udev, NULL, 0, __func__); + if (ret <= 0) + return ret; + xhci = hcd_to_xhci(hcd); + slot_id = udev->slot_id; + virt_dev = xhci->devs[slot_id]; + if (!virt_dev) { + xhci_dbg(xhci, "%s called with invalid slot ID %u\n", + __func__, slot_id); + return -EINVAL; + } + + xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); + /* Allocate the command structure that holds the struct completion. + * Assume we're in process context, since the normal device reset + * process has to wait for the device anyway. Storage devices are + * reset as part of error handling, so use GFP_NOIO instead of + * GFP_KERNEL. + */ + reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO); + if (!reset_device_cmd) { + xhci_dbg(xhci, "Couldn't allocate command structure.\n"); + return -ENOMEM; + } + + /* Attempt to submit the Reset Device command to the command ring */ + spin_lock_irqsave(&xhci->lock, flags); + reset_device_cmd->command_trb = xhci->cmd_ring->enqueue; + list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list); + ret = xhci_queue_reset_device(xhci, slot_id); + if (ret) { + xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); + list_del(&reset_device_cmd->cmd_list); + spin_unlock_irqrestore(&xhci->lock, flags); + goto command_cleanup; + } + xhci_ring_cmd_db(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); + + /* Wait for the Reset Device command to finish */ + timeleft = wait_for_completion_interruptible_timeout( + reset_device_cmd->completion, + USB_CTRL_SET_TIMEOUT); + if (timeleft <= 0) { + xhci_warn(xhci, "%s while waiting for reset device command\n", + timeleft == 0 ? "Timeout" : "Signal"); + spin_lock_irqsave(&xhci->lock, flags); + /* The timeout might have raced with the event ring handler, so + * only delete from the list if the item isn't poisoned. + */ + if (reset_device_cmd->cmd_list.next != LIST_POISON1) + list_del(&reset_device_cmd->cmd_list); + spin_unlock_irqrestore(&xhci->lock, flags); + ret = -ETIME; + goto command_cleanup; + } + + /* The Reset Device command can't fail, according to the 0.95/0.96 spec, + * unless we tried to reset a slot ID that wasn't enabled, + * or the device wasn't in the addressed or configured state. + */ + ret = reset_device_cmd->status; + switch (ret) { + case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */ + case COMP_CTX_STATE: /* 0.96 completion code for same thing */ + xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n", + slot_id, + xhci_get_slot_state(xhci, virt_dev->out_ctx)); + xhci_info(xhci, "Not freeing device rings.\n"); + /* Don't treat this as an error. May change my mind later. */ + ret = 0; + goto command_cleanup; + case COMP_SUCCESS: + xhci_dbg(xhci, "Successful reset device command.\n"); + break; + default: + if (xhci_is_vendor_info_code(xhci, ret)) + break; + xhci_warn(xhci, "Unknown completion code %u for " + "reset device command.\n", ret); + ret = -EINVAL; + goto command_cleanup; + } + + /* Everything but endpoint 0 is disabled, so free or cache the rings. */ + last_freed_endpoint = 1; + for (i = 1; i < 31; ++i) { + if (!virt_dev->eps[i].ring) + continue; + xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); + last_freed_endpoint = i; + } + xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); + xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); + ret = 0; + +command_cleanup: + xhci_free_command(xhci, reset_device_cmd); + return ret; +} + +/* + * At this point, the struct usb_device is about to go away, the device has + * disconnected, and all traffic has been stopped and the endpoints have been + * disabled. Free any HC data structures associated with that device. + */ +void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct xhci_virt_device *virt_dev; + unsigned long flags; + u32 state; + int i; + + if (udev->slot_id == 0) + return; + virt_dev = xhci->devs[udev->slot_id]; + if (!virt_dev) + return; + + /* Stop any wayward timer functions (which may grab the lock) */ + for (i = 0; i < 31; ++i) { + virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING; + del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); + } + + spin_lock_irqsave(&xhci->lock, flags); + /* Don't disable the slot if the host controller is dead. */ + state = xhci_readl(xhci, &xhci->op_regs->status); + if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) { + xhci_free_virt_device(xhci, udev->slot_id); + spin_unlock_irqrestore(&xhci->lock, flags); + return; + } + + if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) { + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); + return; + } + xhci_ring_cmd_db(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); + /* + * Event command completion handler will free any data structures + * associated with the slot. XXX Can free sleep? + */ +} + +/* + * Returns 0 if the xHC ran out of device slots, the Enable Slot command + * timed out, or allocating memory failed. Returns 1 on success. + */ +int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + unsigned long flags; + int timeleft; + int ret; + + spin_lock_irqsave(&xhci->lock, flags); + ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0); + if (ret) { + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); + return 0; + } + xhci_ring_cmd_db(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); + + /* XXX: how much time for xHC slot assignment? */ + timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, + USB_CTRL_SET_TIMEOUT); + if (timeleft <= 0) { + xhci_warn(xhci, "%s while waiting for a slot\n", + timeleft == 0 ? "Timeout" : "Signal"); + /* FIXME cancel the enable slot request */ + return 0; + } + + if (!xhci->slot_id) { + xhci_err(xhci, "Error while assigning device slot ID\n"); + return 0; + } + /* xhci_alloc_virt_device() does not touch rings; no need to lock */ + if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) { + /* Disable slot, if we can do it without mem alloc */ + xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); + spin_lock_irqsave(&xhci->lock, flags); + if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) + xhci_ring_cmd_db(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); + return 0; + } + udev->slot_id = xhci->slot_id; + /* Is this a LS or FS device under a HS hub? */ + /* Hub or peripherial? */ + return 1; +} + +/* + * Issue an Address Device command (which will issue a SetAddress request to + * the device). + * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so + * we should only issue and wait on one address command at the same time. + * + * We add one to the device address issued by the hardware because the USB core + * uses address 1 for the root hubs (even though they're not really devices). + */ +int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) +{ + unsigned long flags; + int timeleft; + struct xhci_virt_device *virt_dev; + int ret = 0; + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct xhci_slot_ctx *slot_ctx; + struct xhci_input_control_ctx *ctrl_ctx; + u64 temp_64; + + if (!udev->slot_id) { + xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id); + return -EINVAL; + } + + virt_dev = xhci->devs[udev->slot_id]; + + /* If this is a Set Address to an unconfigured device, setup ep 0 */ + if (!udev->config) + xhci_setup_addressable_virt_dev(xhci, udev); + /* Otherwise, assume the core has the device configured how it wants */ + xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); + xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); + + spin_lock_irqsave(&xhci->lock, flags); + ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma, + udev->slot_id); + if (ret) { + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); + return ret; + } + xhci_ring_cmd_db(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); + + /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ + timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, + USB_CTRL_SET_TIMEOUT); + /* FIXME: From section 4.3.4: "Software shall be responsible for timing + * the SetAddress() "recovery interval" required by USB and aborting the + * command on a timeout. + */ + if (timeleft <= 0) { + xhci_warn(xhci, "%s while waiting for a slot\n", + timeleft == 0 ? "Timeout" : "Signal"); + /* FIXME cancel the address device command */ + return -ETIME; + } + + switch (virt_dev->cmd_status) { + case COMP_CTX_STATE: + case COMP_EBADSLT: + xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n", + udev->slot_id); + ret = -EINVAL; + break; + case COMP_TX_ERR: + dev_warn(&udev->dev, "Device not responding to set address.\n"); + ret = -EPROTO; + break; + case COMP_SUCCESS: + xhci_dbg(xhci, "Successful Address Device command\n"); + break; + default: + xhci_err(xhci, "ERROR: unexpected command completion " + "code 0x%x.\n", virt_dev->cmd_status); + xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); + xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); + ret = -EINVAL; + break; + } + if (ret) { + return ret; + } + temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); + xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64); + xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n", + udev->slot_id, + &xhci->dcbaa->dev_context_ptrs[udev->slot_id], + (unsigned long long) + xhci->dcbaa->dev_context_ptrs[udev->slot_id]); + xhci_dbg(xhci, "Output Context DMA address = %#08llx\n", + (unsigned long long)virt_dev->out_ctx->dma); + xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); + xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); + xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); + xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); + /* + * USB core uses address 1 for the roothubs, so we add one to the + * address given back to us by the HC. + */ + slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); + udev->devnum = (slot_ctx->dev_state & DEV_ADDR_MASK) + 1; + /* Zero the input context control for later use */ + ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); + ctrl_ctx->add_flags = 0; + ctrl_ctx->drop_flags = 0; + + xhci_dbg(xhci, "Device address = %d\n", udev->devnum); + /* XXX Meh, not sure if anyone else but choose_address uses this. */ + set_bit(udev->devnum, udev->bus->devmap.devicemap); + + return 0; +} + +/* Once a hub descriptor is fetched for a device, we need to update the xHC's + * internal data structures for the device. + */ +int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, + struct usb_tt *tt, gfp_t mem_flags) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct xhci_virt_device *vdev; + struct xhci_command *config_cmd; + struct xhci_input_control_ctx *ctrl_ctx; + struct xhci_slot_ctx *slot_ctx; + unsigned long flags; + unsigned think_time; + int ret; + + /* Ignore root hubs */ + if (!hdev->parent) + return 0; + + vdev = xhci->devs[hdev->slot_id]; + if (!vdev) { + xhci_warn(xhci, "Cannot update hub desc for unknown device.\n"); + return -EINVAL; + } + config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); + if (!config_cmd) { + xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); + return -ENOMEM; + } + + spin_lock_irqsave(&xhci->lock, flags); + xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); + ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx); + ctrl_ctx->add_flags |= SLOT_FLAG; + slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); + slot_ctx->dev_info |= DEV_HUB; + if (tt->multi) + slot_ctx->dev_info |= DEV_MTT; + if (xhci->hci_version > 0x95) { + xhci_dbg(xhci, "xHCI version %x needs hub " + "TT think time and number of ports\n", + (unsigned int) xhci->hci_version); + slot_ctx->dev_info2 |= XHCI_MAX_PORTS(hdev->maxchild); + /* Set TT think time - convert from ns to FS bit times. + * 0 = 8 FS bit times, 1 = 16 FS bit times, + * 2 = 24 FS bit times, 3 = 32 FS bit times. + */ + think_time = tt->think_time; + if (think_time != 0) + think_time = (think_time / 666) - 1; + slot_ctx->tt_info |= TT_THINK_TIME(think_time); + } else { + xhci_dbg(xhci, "xHCI version %x doesn't need hub " + "TT think time or number of ports\n", + (unsigned int) xhci->hci_version); + } + slot_ctx->dev_state = 0; + spin_unlock_irqrestore(&xhci->lock, flags); + + xhci_dbg(xhci, "Set up %s for hub device.\n", + (xhci->hci_version > 0x95) ? + "configure endpoint" : "evaluate context"); + xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id); + xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0); + + /* Issue and wait for the configure endpoint or + * evaluate context command. + */ + if (xhci->hci_version > 0x95) + ret = xhci_configure_endpoint(xhci, hdev, config_cmd, + false, false); + else + ret = xhci_configure_endpoint(xhci, hdev, config_cmd, + true, false); + + xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id); + xhci_dbg_ctx(xhci, vdev->out_ctx, 0); + + xhci_free_command(xhci, config_cmd); + return ret; +} + +int xhci_get_frame(struct usb_hcd *hcd) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + /* EHCI mods by the periodic size. Why? */ + return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3; +} + +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_LICENSE("GPL"); + +static int __init xhci_hcd_init(void) +{ +#ifdef CONFIG_PCI + int retval = 0; + + retval = xhci_register_pci(); + + if (retval < 0) { + printk(KERN_DEBUG "Problem registering PCI driver."); + return retval; + } +#endif + /* + * Check the compiler generated sizes of structures that must be laid + * out in specific ways for hardware access. + */ + BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); + BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8); + BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8); + /* xhci_device_control has eight fields, and also + * embeds one xhci_slot_ctx and 31 xhci_ep_ctx + */ + BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); + BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); + BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); + BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8); + BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8); + /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */ + BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); + BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); + return 0; +} +module_init(xhci_hcd_init); + +static void __exit xhci_hcd_cleanup(void) +{ +#ifdef CONFIG_PCI + xhci_unregister_pci(); +#endif +} +module_exit(xhci_hcd_cleanup); -- cgit v1.2.3 From 1d68064a7d80da4a7334cab0356162e36229c1a1 Mon Sep 17 00:00:00 2001 From: Andiry Xu Date: Fri, 12 Mar 2010 17:10:04 +0800 Subject: USB: xHCI: re-initialize cmd_completion When a signal interrupts a Configure Endpoint command, the cmd_completion used in xhci_configure_endpoint() is not re-initialized and the wait_for_completion_interruptible_timeout() will return failure. Initialize cmd_completion in xhci_configure_endpoint(). Signed-off-by: Andiry Xu Signed-off-by: Sarah Sharp Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/xhci.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 4cb69e0af834..492a61c2c79d 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -1173,6 +1173,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, cmd_completion = &virt_dev->cmd_completion; cmd_status = &virt_dev->cmd_status; } + init_completion(cmd_completion); if (!ctx_change) ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma, -- cgit v1.2.3 From dee5658b482e9e2ac7d6205dc876fc11d4008138 Mon Sep 17 00:00:00 2001 From: Daniel Sangorrin Date: Thu, 11 Mar 2010 14:10:58 -0800 Subject: USB: serial: ftdi: add CONTEC vendor and product id This is a patch to ftdi_sio_ids.h and ftdi_sio.c that adds identifiers for CONTEC USB serial converter. I tested it with the device COM-1(USB)H [akpm@linux-foundation.org: keep the VIDs sorted a bit] Signed-off-by: Daniel Sangorrin Cc: Andreas Mohr Cc: Radek Liboska Cc: stable Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/ftdi_sio.c | 1 + drivers/usb/serial/ftdi_sio_ids.h | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 6af0dfa5f5ac..6fc09dc3b53e 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -658,6 +658,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(EVOLUTION_VID, EVOLUTION_ER1_PID) }, { USB_DEVICE(EVOLUTION_VID, EVO_HYBRID_PID) }, { USB_DEVICE(EVOLUTION_VID, EVO_RCM4_PID) }, + { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ARTEMIS_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16C_PID) }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 0727e198503e..75482cbc3998 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -500,6 +500,13 @@ #define CONTEC_VID 0x06CE /* Vendor ID */ #define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */ +/* + * Contec products (http://www.contec.com) + * Submitted by Daniel Sangorrin + */ +#define CONTEC_VID 0x06CE /* Vendor ID */ +#define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */ + /* * Definitions for B&B Electronics products. */ -- cgit v1.2.3 From eaff4cdc978f414cf7b5441a333de3070d80e9c7 Mon Sep 17 00:00:00 2001 From: Nathaniel McCallum Date: Thu, 11 Mar 2010 13:09:24 -0500 Subject: USB: option: fix incorrect manufacturer name in usb/serial/option: MAXON->CMOTECH Signed-off-by: Nathaniel McCallum Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/option.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 3ab1a0440d81..f19fd3335cdb 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -288,7 +288,7 @@ static int option_resume(struct usb_serial *serial); #define QUALCOMM_VENDOR_ID 0x05C6 -#define MAXON_VENDOR_ID 0x16d8 +#define CMOTECH_VENDOR_ID 0x16d8 #define TELIT_VENDOR_ID 0x1bc7 #define TELIT_PRODUCT_UC864E 0x1003 @@ -548,7 +548,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ - { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */ + { USB_DEVICE(CMOTECH_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ -- cgit v1.2.3 From bb73ed2a268a29ab1b7d8cc50b5f248578e7e188 Mon Sep 17 00:00:00 2001 From: Nathaniel McCallum Date: Thu, 11 Mar 2010 13:01:17 -0500 Subject: USB: option: move hardcoded PID to a macro in usb/serial/option Signed-off-by: Nathaniel McCallum Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/option.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index f19fd3335cdb..132ad930e6b2 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -289,6 +289,7 @@ static int option_resume(struct usb_serial *serial); #define QUALCOMM_VENDOR_ID 0x05C6 #define CMOTECH_VENDOR_ID 0x16d8 +#define CMOTECH_PRODUCT_6280 0x6280 #define TELIT_VENDOR_ID 0x1bc7 #define TELIT_PRODUCT_UC864E 0x1003 @@ -548,7 +549,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ - { USB_DEVICE(CMOTECH_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */ + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ -- cgit v1.2.3 From 3b04872aa75006e2a4adaaec21e9c9ede8b8ad9d Mon Sep 17 00:00:00 2001 From: Nathaniel McCallum Date: Thu, 11 Mar 2010 13:09:26 -0500 Subject: USB: option: add support for a new CMOTECH device to usb/serial/option Signed-off-by: Nathaniel McCallum Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/option.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 132ad930e6b2..3af1eb8aa635 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -289,6 +289,7 @@ static int option_resume(struct usb_serial *serial); #define QUALCOMM_VENDOR_ID 0x05C6 #define CMOTECH_VENDOR_ID 0x16d8 +#define CMOTECH_PRODUCT_6008 0x6008 #define CMOTECH_PRODUCT_6280 0x6280 #define TELIT_VENDOR_ID 0x1bc7 @@ -550,6 +551,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */ + { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ -- cgit v1.2.3 From 1e63ef0e0c2cfb5deb9331420c9857fbe04bea73 Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Fri, 12 Mar 2010 11:27:21 +0100 Subject: USB: Fix documentation for avoid_reset_quirk The name used in the documentation doesn't match reality. Signed-off-by: Oliver Neukum Signed-off-by: Greg Kroah-Hartman --- Documentation/ABI/testing/sysfs-bus-usb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb index a986e9bbba3d..bcebb9eaedce 100644 --- a/Documentation/ABI/testing/sysfs-bus-usb +++ b/Documentation/ABI/testing/sysfs-bus-usb @@ -160,7 +160,7 @@ Description: match the driver to the device. For example: # echo "046d c315" > /sys/bus/usb/drivers/foo/remove_id -What: /sys/bus/usb/device/.../avoid_reset +What: /sys/bus/usb/device/.../avoid_reset_quirk Date: December 2009 Contact: Oliver Neukum Description: -- cgit v1.2.3 From fa7bf3424ead0a496f5176abb3253b8176bb2935 Mon Sep 17 00:00:00 2001 From: Grant Likely Date: Thu, 11 Mar 2010 15:06:54 -0700 Subject: usb/gadget: fix compile error on r8a66597-udc.c C file uses IS_ERR and PTR_ERR, but doesn't include Signed-off-by: Grant Likely Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/r8a66597-udc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c index 8b45145b9136..5e13d23b5f0c 100644 --- a/drivers/usb/gadget/r8a66597-udc.c +++ b/drivers/usb/gadget/r8a66597-udc.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include -- cgit v1.2.3 From 9957dd97ec5e98dd334f87ade1d9a0b24d1f86eb Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Fri, 12 Mar 2010 10:35:20 +0200 Subject: usb: musb: Fix compile error for omaps for musb_hdrc CONFIG_ARCH_OMAP34XX is now CONFIG_ARCH_OMAP3. But since drivers/usb/musb/omap2430.c use CONFIG_PM for these registers and functions, do the same for the header. Otherwise we get the following for most omap3 defconfigs: drivers/usb/musb/omap2430.c:261: error: expected identifier or '(' before 'do' drivers/usb/musb/omap2430.c:261: error: expected identifier or '(' before 'while' drivers/usb/musb/omap2430.c:268: error: expected identifier or '(' before 'do' drivers/usb/musb/omap2430.c:268: error: expected identifier or '(' before 'while' Signed-off-by: Tony Lindgren Signed-off-by: Felipe Balbi Signed-off-by: Greg Kroah-Hartman --- drivers/usb/musb/musb_core.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index d849fb81c131..cd9f4a9a06c6 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h @@ -469,7 +469,7 @@ struct musb_csr_regs { struct musb_context_registers { -#if defined(CONFIG_ARCH_OMAP34XX) || defined(CONFIG_ARCH_OMAP2430) +#ifdef CONFIG_PM u32 otg_sysconfig, otg_forcestandby; #endif u8 power; @@ -483,7 +483,7 @@ struct musb_context_registers { struct musb_csr_regs index_regs[MUSB_C_NUM_EPS]; }; -#if defined(CONFIG_ARCH_OMAP34XX) || defined(CONFIG_ARCH_OMAP2430) +#ifdef CONFIG_PM extern void musb_platform_save_context(struct musb *musb, struct musb_context_registers *musb_context); extern void musb_platform_restore_context(struct musb *musb, -- cgit v1.2.3 From adb3ee421d6d39fbfadadf7093a587461ac4597e Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Fri, 12 Mar 2010 10:27:21 +0200 Subject: usb: musb: abstract out ULPI_BUSCONTROL register reads/writes The USB PHY on current Blackfin processors is a UTMI+ level 2 PHY. However, it has no ULPI support - so there are no registers at all. That means accesses to ULPI_BUSCONTROL have to be abstracted away like other MUSB registers. This fixes building for Blackfin parts again. Signed-off-by: Mike Frysinger Acked-by: Anand Gadiyar Signed-off-by: Felipe Balbi Signed-off-by: Greg Kroah-Hartman --- drivers/usb/musb/musb_core.c | 5 ++--- drivers/usb/musb/musb_regs.h | 19 +++++++++++++++++++ 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index b4bbf8f2c238..e54e468c5672 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -2007,7 +2007,6 @@ bad_config: /* host side needs more setup */ if (is_host_enabled(musb)) { struct usb_hcd *hcd = musb_to_hcd(musb); - u8 busctl; otg_set_host(musb->xceiv, &hcd->self); @@ -2018,9 +2017,9 @@ bad_config: /* program PHY to use external vBus if required */ if (plat->extvbus) { - busctl = musb_readb(musb->mregs, MUSB_ULPI_BUSCONTROL); + u8 busctl = musb_read_ulpi_buscontrol(musb->mregs); busctl |= MUSB_ULPI_USE_EXTVBUS; - musb_writeb(musb->mregs, MUSB_ULPI_BUSCONTROL, busctl); + musb_write_ulpi_buscontrol(musb->mregs, busctl); } } diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h index 8d8062b10e2f..327d0edd210e 100644 --- a/drivers/usb/musb/musb_regs.h +++ b/drivers/usb/musb/musb_regs.h @@ -326,6 +326,11 @@ static inline void musb_write_rxfifoadd(void __iomem *mbase, u16 c_off) musb_writew(mbase, MUSB_RXFIFOADD, c_off); } +static inline void musb_write_ulpi_buscontrol(void __iomem *mbase, u8 val) +{ + musb_writeb(mbase, MUSB_ULPI_BUSCONTROL, val); +} + static inline u8 musb_read_txfifosz(void __iomem *mbase) { return musb_readb(mbase, MUSB_TXFIFOSZ); @@ -346,6 +351,11 @@ static inline u16 musb_read_rxfifoadd(void __iomem *mbase) return musb_readw(mbase, MUSB_RXFIFOADD); } +static inline u8 musb_read_ulpi_buscontrol(void __iomem *mbase) +{ + return musb_readb(mbase, MUSB_ULPI_BUSCONTROL); +} + static inline u8 musb_read_configdata(void __iomem *mbase) { musb_writeb(mbase, MUSB_INDEX, 0); @@ -510,6 +520,10 @@ static inline void musb_write_rxfifoadd(void __iomem *mbase, u16 c_off) { } +static inline void musb_write_ulpi_buscontrol(void __iomem *mbase, u8 val) +{ +} + static inline u8 musb_read_txfifosz(void __iomem *mbase) { } @@ -526,6 +540,11 @@ static inline u16 musb_read_rxfifoadd(void __iomem *mbase) { } +static inline u8 musb_read_ulpi_buscontrol(void __iomem *mbase) +{ + return 0; +} + static inline u8 musb_read_configdata(void __iomem *mbase) { return 0; -- cgit v1.2.3 From 7f4bca4049941ba8dac35775fe462d4ef9f0dce4 Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Fri, 12 Mar 2010 10:27:23 +0200 Subject: USB: musb: fix warnings in Blackfin regs The recent commit "usb: musb: Add context save and restore support" added some stubs for the Blackfin code so things would compile, but it also added a bunch of warnings due to missing return statements. Signed-off-by: Mike Frysinger Signed-off-by: Felipe Balbi Signed-off-by: Greg Kroah-Hartman --- drivers/usb/musb/musb_regs.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h index 327d0edd210e..fa55aacc385d 100644 --- a/drivers/usb/musb/musb_regs.h +++ b/drivers/usb/musb/musb_regs.h @@ -526,18 +526,22 @@ static inline void musb_write_ulpi_buscontrol(void __iomem *mbase, u8 val) static inline u8 musb_read_txfifosz(void __iomem *mbase) { + return 0; } static inline u16 musb_read_txfifoadd(void __iomem *mbase) { + return 0; } static inline u8 musb_read_rxfifosz(void __iomem *mbase) { + return 0; } static inline u16 musb_read_rxfifoadd(void __iomem *mbase) { + return 0; } static inline u8 musb_read_ulpi_buscontrol(void __iomem *mbase) @@ -596,22 +600,27 @@ static inline void musb_write_txhubport(void __iomem *mbase, u8 epnum, static inline u8 musb_read_rxfunaddr(void __iomem *mbase, u8 epnum) { + return 0; } static inline u8 musb_read_rxhubaddr(void __iomem *mbase, u8 epnum) { + return 0; } static inline u8 musb_read_rxhubport(void __iomem *mbase, u8 epnum) { + return 0; } static inline u8 musb_read_txfunaddr(void __iomem *mbase, u8 epnum) { + return 0; } static inline u8 musb_read_txhubaddr(void __iomem *mbase, u8 epnum) { + return 0; } static inline void musb_read_txhubport(void __iomem *mbase, u8 epnum) -- cgit v1.2.3 From aa4714560b4ea359bb7830188ebd06bce71bcdea Mon Sep 17 00:00:00 2001 From: Felipe Balbi Date: Fri, 12 Mar 2010 10:27:24 +0200 Subject: usb: musb: core: declare mbase only where it's used ... and avoid a compilation if we disable host side of musb. Signed-off-by: Felipe Balbi Signed-off-by: Greg Kroah-Hartman --- drivers/usb/musb/musb_core.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index e54e468c5672..0e8b8ab1d168 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -379,7 +379,6 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, u8 devctl, u8 power) { irqreturn_t handled = IRQ_NONE; - void __iomem *mbase = musb->mregs; DBG(3, "<== Power=%02x, DevCtl=%02x, int_usb=0x%x\n", power, devctl, int_usb); @@ -394,6 +393,8 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, if (devctl & MUSB_DEVCTL_HM) { #ifdef CONFIG_USB_MUSB_HDRC_HCD + void __iomem *mbase = musb->mregs; + switch (musb->xceiv->state) { case OTG_STATE_A_SUSPEND: /* remote wakeup? later, GetPortStatus @@ -471,6 +472,8 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, #ifdef CONFIG_USB_MUSB_HDRC_HCD /* see manual for the order of the tests */ if (int_usb & MUSB_INTR_SESSREQ) { + void __iomem *mbase = musb->mregs; + DBG(1, "SESSION_REQUEST (%s)\n", otg_state_string(musb)); /* IRQ arrives from ID pin sense or (later, if VBUS power @@ -519,6 +522,8 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, case OTG_STATE_A_WAIT_BCON: case OTG_STATE_A_WAIT_VRISE: if (musb->vbuserr_retry) { + void __iomem *mbase = musb->mregs; + musb->vbuserr_retry--; ignore = 1; devctl |= MUSB_DEVCTL_SESSION; @@ -622,6 +627,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, if (int_usb & MUSB_INTR_CONNECT) { struct usb_hcd *hcd = musb_to_hcd(musb); + void __iomem *mbase = musb->mregs; handled = IRQ_HANDLED; musb->is_active = 1; -- cgit v1.2.3 From 860e41a71c1731e79e1920dc42676bafc925af5e Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Sat, 27 Feb 2010 20:54:24 +0100 Subject: usb: cdc-wdm: Fix race between write and disconnect Unify mutexes to fix a race between write and disconnect and shift the test for disconnection to always report it. Signed-off-by: Oliver Neukum Signed-off-by: Greg Kroah-Hartman --- drivers/usb/class/cdc-wdm.c | 84 ++++++++++++++++++++++++--------------------- 1 file changed, 45 insertions(+), 39 deletions(-) diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 18aafcb08fc8..cf1c5fb918dc 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -87,9 +87,7 @@ struct wdm_device { int count; dma_addr_t shandle; dma_addr_t ihandle; - struct mutex wlock; - struct mutex rlock; - struct mutex plock; + struct mutex lock; wait_queue_head_t wait; struct work_struct rxwork; int werr; @@ -305,14 +303,38 @@ static ssize_t wdm_write if (we < 0) return -EIO; - r = mutex_lock_interruptible(&desc->wlock); /* concurrent writes */ + desc->outbuf = buf = kmalloc(count, GFP_KERNEL); + if (!buf) { + rv = -ENOMEM; + goto outnl; + } + + r = copy_from_user(buf, buffer, count); + if (r > 0) { + kfree(buf); + rv = -EFAULT; + goto outnl; + } + + /* concurrent writes and disconnect */ + r = mutex_lock_interruptible(&desc->lock); rv = -ERESTARTSYS; - if (r) + if (r) { + kfree(buf); goto outnl; + } + + if (test_bit(WDM_DISCONNECTING, &desc->flags)) { + kfree(buf); + rv = -ENODEV; + goto outnp; + } r = usb_autopm_get_interface(desc->intf); - if (r < 0) + if (r < 0) { + kfree(buf); goto outnp; + } if (!file->f_flags && O_NONBLOCK) r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE, @@ -320,24 +342,8 @@ static ssize_t wdm_write else if (test_bit(WDM_IN_USE, &desc->flags)) r = -EAGAIN; - if (r < 0) - goto out; - - if (test_bit(WDM_DISCONNECTING, &desc->flags)) { - rv = -ENODEV; - goto out; - } - - desc->outbuf = buf = kmalloc(count, GFP_KERNEL); - if (!buf) { - rv = -ENOMEM; - goto out; - } - - r = copy_from_user(buf, buffer, count); - if (r > 0) { + if (r < 0) { kfree(buf); - rv = -EFAULT; goto out; } @@ -374,7 +380,7 @@ static ssize_t wdm_write out: usb_autopm_put_interface(desc->intf); outnp: - mutex_unlock(&desc->wlock); + mutex_unlock(&desc->lock); outnl: return rv < 0 ? rv : count; } @@ -387,7 +393,7 @@ static ssize_t wdm_read struct wdm_device *desc = file->private_data; - rv = mutex_lock_interruptible(&desc->rlock); /*concurrent reads */ + rv = mutex_lock_interruptible(&desc->lock); /*concurrent reads */ if (rv < 0) return -ERESTARTSYS; @@ -465,7 +471,7 @@ retry: rv = cntr; err: - mutex_unlock(&desc->rlock); + mutex_unlock(&desc->lock); if (rv < 0 && rv != -EAGAIN) dev_err(&desc->intf->dev, "wdm_read: exit error\n"); return rv; @@ -533,7 +539,7 @@ static int wdm_open(struct inode *inode, struct file *file) } intf->needs_remote_wakeup = 1; - mutex_lock(&desc->plock); + mutex_lock(&desc->lock); if (!desc->count++) { rv = usb_submit_urb(desc->validity, GFP_KERNEL); if (rv < 0) { @@ -544,7 +550,7 @@ static int wdm_open(struct inode *inode, struct file *file) } else { rv = 0; } - mutex_unlock(&desc->plock); + mutex_unlock(&desc->lock); usb_autopm_put_interface(desc->intf); out: mutex_unlock(&wdm_mutex); @@ -556,9 +562,9 @@ static int wdm_release(struct inode *inode, struct file *file) struct wdm_device *desc = file->private_data; mutex_lock(&wdm_mutex); - mutex_lock(&desc->plock); + mutex_lock(&desc->lock); desc->count--; - mutex_unlock(&desc->plock); + mutex_unlock(&desc->lock); if (!desc->count) { dev_dbg(&desc->intf->dev, "wdm_release: cleanup"); @@ -655,9 +661,7 @@ next_desc: desc = kzalloc(sizeof(struct wdm_device), GFP_KERNEL); if (!desc) goto out; - mutex_init(&desc->wlock); - mutex_init(&desc->rlock); - mutex_init(&desc->plock); + mutex_init(&desc->lock); spin_lock_init(&desc->iuspin); init_waitqueue_head(&desc->wait); desc->wMaxCommand = maxcom; @@ -772,7 +776,9 @@ static void wdm_disconnect(struct usb_interface *intf) clear_bit(WDM_IN_USE, &desc->flags); spin_unlock_irqrestore(&desc->iuspin, flags); cancel_work_sync(&desc->rxwork); + mutex_lock(&desc->lock); kill_urbs(desc); + mutex_unlock(&desc->lock); wake_up_all(&desc->wait); if (!desc->count) cleanup(desc); @@ -786,7 +792,7 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message) dev_dbg(&desc->intf->dev, "wdm%d_suspend\n", intf->minor); - mutex_lock(&desc->plock); + mutex_lock(&desc->lock); #ifdef CONFIG_PM if ((message.event & PM_EVENT_AUTO) && test_bit(WDM_IN_USE, &desc->flags)) { @@ -798,7 +804,7 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message) #ifdef CONFIG_PM } #endif - mutex_unlock(&desc->plock); + mutex_unlock(&desc->lock); return rv; } @@ -821,9 +827,9 @@ static int wdm_resume(struct usb_interface *intf) int rv; dev_dbg(&desc->intf->dev, "wdm%d_resume\n", intf->minor); - mutex_lock(&desc->plock); + mutex_lock(&desc->lock); rv = recover_from_urb_loss(desc); - mutex_unlock(&desc->plock); + mutex_unlock(&desc->lock); return rv; } @@ -831,7 +837,7 @@ static int wdm_pre_reset(struct usb_interface *intf) { struct wdm_device *desc = usb_get_intfdata(intf); - mutex_lock(&desc->plock); + mutex_lock(&desc->lock); return 0; } @@ -841,7 +847,7 @@ static int wdm_post_reset(struct usb_interface *intf) int rv; rv = recover_from_urb_loss(desc); - mutex_unlock(&desc->plock); + mutex_unlock(&desc->lock); return 0; } -- cgit v1.2.3 From 922a5eadd5a3aa0b806be0c18694b618d41d0784 Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Sat, 27 Feb 2010 20:54:59 +0100 Subject: usb: cdc-wdm: Fix race between autosuspend and reading from the device While an available response is read the device must not be autosuspended. This requires a flag dedicated to that purpose. Signed-off-by: Oliver Neukum Signed-off-by: Greg Kroah-Hartman --- drivers/usb/class/cdc-wdm.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index cf1c5fb918dc..940b17af162c 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -52,6 +52,7 @@ MODULE_DEVICE_TABLE (usb, wdm_ids); #define WDM_READ 4 #define WDM_INT_STALL 5 #define WDM_POLL_RUNNING 6 +#define WDM_RESPONDING 7 #define WDM_MAX 16 @@ -115,21 +116,22 @@ static void wdm_in_callback(struct urb *urb) int status = urb->status; spin_lock(&desc->iuspin); + clear_bit(WDM_RESPONDING, &desc->flags); if (status) { switch (status) { case -ENOENT: dev_dbg(&desc->intf->dev, "nonzero urb status received: -ENOENT"); - break; + goto skip_error; case -ECONNRESET: dev_dbg(&desc->intf->dev, "nonzero urb status received: -ECONNRESET"); - break; + goto skip_error; case -ESHUTDOWN: dev_dbg(&desc->intf->dev, "nonzero urb status received: -ESHUTDOWN"); - break; + goto skip_error; case -EPIPE: dev_err(&desc->intf->dev, "nonzero urb status received: -EPIPE\n"); @@ -145,6 +147,7 @@ static void wdm_in_callback(struct urb *urb) desc->reslength = urb->actual_length; memmove(desc->ubuf + desc->length, desc->inbuf, desc->reslength); desc->length += desc->reslength; +skip_error: wake_up(&desc->wait); set_bit(WDM_READ, &desc->flags); @@ -227,6 +230,7 @@ static void wdm_int_callback(struct urb *urb) desc->response->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; spin_lock(&desc->iuspin); clear_bit(WDM_READ, &desc->flags); + set_bit(WDM_RESPONDING, &desc->flags); if (!test_bit(WDM_DISCONNECTING, &desc->flags)) { rv = usb_submit_urb(desc->response, GFP_ATOMIC); dev_dbg(&desc->intf->dev, "%s: usb_submit_urb %d", @@ -234,6 +238,7 @@ static void wdm_int_callback(struct urb *urb) } spin_unlock(&desc->iuspin); if (rv < 0) { + clear_bit(WDM_RESPONDING, &desc->flags); if (rv == -EPERM) return; if (rv == -ENOMEM) { @@ -795,7 +800,8 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message) mutex_lock(&desc->lock); #ifdef CONFIG_PM if ((message.event & PM_EVENT_AUTO) && - test_bit(WDM_IN_USE, &desc->flags)) { + (test_bit(WDM_IN_USE, &desc->flags) + || test_bit(WDM_RESPONDING, &desc->flags))) { rv = -EBUSY; } else { #endif -- cgit v1.2.3 From d855fe2e9c19edaa47baba0e7f95e17f7a24dba8 Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Sat, 27 Feb 2010 20:55:26 +0100 Subject: usb: cdc-wdm: Fix race between disconnect and debug messages dev_dbg() and dev_err() cannot be used to report failures that may have been caused by a device's removal Signed-off-by: Oliver Neukum Signed-off-by: Greg Kroah-Hartman --- drivers/usb/class/cdc-wdm.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 940b17af162c..72e2eb030459 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -435,11 +435,8 @@ retry: spin_lock_irq(&desc->iuspin); if (desc->rerr) { /* read completed, error happened */ - int t = desc->rerr; desc->rerr = 0; spin_unlock_irq(&desc->iuspin); - dev_err(&desc->intf->dev, - "reading had resulted in %d\n", t); rv = -EIO; goto err; } @@ -477,8 +474,6 @@ retry: err: mutex_unlock(&desc->lock); - if (rv < 0 && rv != -EAGAIN) - dev_err(&desc->intf->dev, "wdm_read: exit error\n"); return rv; } -- cgit v1.2.3 From beb1d35f1690fe27694472a010a8e4a9ae11cc50 Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Sat, 27 Feb 2010 20:55:52 +0100 Subject: usb: cdc-wdm: Fix submission of URB after suspension There's a window under which cdc-wdm may submit an URB to a device about to be suspended. This introduces a flag to prevent it. Signed-off-by: Oliver Neukum Signed-off-by: Greg Kroah-Hartman --- drivers/usb/class/cdc-wdm.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 72e2eb030459..a6b5e9fd0714 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -53,7 +53,7 @@ MODULE_DEVICE_TABLE (usb, wdm_ids); #define WDM_INT_STALL 5 #define WDM_POLL_RUNNING 6 #define WDM_RESPONDING 7 - +#define WDM_SUSPENDING 8 #define WDM_MAX 16 @@ -231,7 +231,8 @@ static void wdm_int_callback(struct urb *urb) spin_lock(&desc->iuspin); clear_bit(WDM_READ, &desc->flags); set_bit(WDM_RESPONDING, &desc->flags); - if (!test_bit(WDM_DISCONNECTING, &desc->flags)) { + if (!test_bit(WDM_DISCONNECTING, &desc->flags) + && !test_bit(WDM_SUSPENDING, &desc->flags)) { rv = usb_submit_urb(desc->response, GFP_ATOMIC); dev_dbg(&desc->intf->dev, "%s: usb_submit_urb %d", __func__, rv); @@ -800,6 +801,7 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message) rv = -EBUSY; } else { #endif + set_bit(WDM_SUSPENDING, &desc->flags); cancel_work_sync(&desc->rxwork); kill_urbs(desc); #ifdef CONFIG_PM @@ -830,6 +832,7 @@ static int wdm_resume(struct usb_interface *intf) dev_dbg(&desc->intf->dev, "wdm%d_resume\n", intf->minor); mutex_lock(&desc->lock); rv = recover_from_urb_loss(desc); + clear_bit(WDM_SUSPENDING, &desc->flags); mutex_unlock(&desc->lock); return rv; } -- cgit v1.2.3 From 62e6685470fb04fb7688ecef96c39160498721d5 Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Sat, 27 Feb 2010 20:56:22 +0100 Subject: usb: cdc-wdm:Fix loss of data due to autosuspend The guarding flag must be set and tested under spinlock and cleared before the URBs are resubmitted in resume. Signed-off-by: Oliver Neukum Signed-off-by: Greg Kroah-Hartman --- drivers/usb/class/cdc-wdm.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index a6b5e9fd0714..07c12974fe14 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -794,14 +794,17 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message) dev_dbg(&desc->intf->dev, "wdm%d_suspend\n", intf->minor); mutex_lock(&desc->lock); + spin_lock_irq(&desc->iuspin); #ifdef CONFIG_PM if ((message.event & PM_EVENT_AUTO) && (test_bit(WDM_IN_USE, &desc->flags) || test_bit(WDM_RESPONDING, &desc->flags))) { + spin_unlock_irq(&desc->iuspin); rv = -EBUSY; } else { #endif set_bit(WDM_SUSPENDING, &desc->flags); + spin_unlock_irq(&desc->iuspin); cancel_work_sync(&desc->rxwork); kill_urbs(desc); #ifdef CONFIG_PM @@ -831,8 +834,8 @@ static int wdm_resume(struct usb_interface *intf) dev_dbg(&desc->intf->dev, "wdm%d_resume\n", intf->minor); mutex_lock(&desc->lock); - rv = recover_from_urb_loss(desc); clear_bit(WDM_SUSPENDING, &desc->flags); + rv = recover_from_urb_loss(desc); mutex_unlock(&desc->lock); return rv; } -- cgit v1.2.3 From d93d16e9aa58887feadd999ea26b7b8139e98b56 Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Sat, 27 Feb 2010 20:56:47 +0100 Subject: usb: cdc-wdm: Fix order in disconnect and fix locking - as the callback can schedule work, URBs must be killed first - if the driver causes an autoresume, the caller must handle locking Signed-off-by: Oliver Neukum Signed-off-by: Greg Kroah-Hartman --- drivers/usb/class/cdc-wdm.c | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 07c12974fe14..b57490508860 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -776,9 +776,9 @@ static void wdm_disconnect(struct usb_interface *intf) /* to terminate pending flushes */ clear_bit(WDM_IN_USE, &desc->flags); spin_unlock_irqrestore(&desc->iuspin, flags); - cancel_work_sync(&desc->rxwork); mutex_lock(&desc->lock); kill_urbs(desc); + cancel_work_sync(&desc->rxwork); mutex_unlock(&desc->lock); wake_up_all(&desc->wait); if (!desc->count) @@ -786,6 +786,7 @@ static void wdm_disconnect(struct usb_interface *intf) mutex_unlock(&wdm_mutex); } +#ifdef CONFIG_PM static int wdm_suspend(struct usb_interface *intf, pm_message_t message) { struct wdm_device *desc = usb_get_intfdata(intf); @@ -793,27 +794,30 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message) dev_dbg(&desc->intf->dev, "wdm%d_suspend\n", intf->minor); - mutex_lock(&desc->lock); + /* if this is an autosuspend the caller does the locking */ + if (!(message.event & PM_EVENT_AUTO)) + mutex_lock(&desc->lock); spin_lock_irq(&desc->iuspin); -#ifdef CONFIG_PM + if ((message.event & PM_EVENT_AUTO) && (test_bit(WDM_IN_USE, &desc->flags) || test_bit(WDM_RESPONDING, &desc->flags))) { spin_unlock_irq(&desc->iuspin); rv = -EBUSY; } else { -#endif + set_bit(WDM_SUSPENDING, &desc->flags); spin_unlock_irq(&desc->iuspin); - cancel_work_sync(&desc->rxwork); + /* callback submits work - order is essential */ kill_urbs(desc); -#ifdef CONFIG_PM + cancel_work_sync(&desc->rxwork); } -#endif - mutex_unlock(&desc->lock); + if (!(message.event & PM_EVENT_AUTO)) + mutex_unlock(&desc->lock); return rv; } +#endif static int recover_from_urb_loss(struct wdm_device *desc) { @@ -827,6 +831,8 @@ static int recover_from_urb_loss(struct wdm_device *desc) } return rv; } + +#ifdef CONFIG_PM static int wdm_resume(struct usb_interface *intf) { struct wdm_device *desc = usb_get_intfdata(intf); @@ -839,6 +845,7 @@ static int wdm_resume(struct usb_interface *intf) mutex_unlock(&desc->lock); return rv; } +#endif static int wdm_pre_reset(struct usb_interface *intf) { @@ -862,9 +869,11 @@ static struct usb_driver wdm_driver = { .name = "cdc_wdm", .probe = wdm_probe, .disconnect = wdm_disconnect, +#ifdef CONFIG_PM .suspend = wdm_suspend, .resume = wdm_resume, .reset_resume = wdm_resume, +#endif .pre_reset = wdm_pre_reset, .post_reset = wdm_post_reset, .id_table = wdm_ids, -- cgit v1.2.3 From 338124c1f18c2c737656ac58735f040d90b23d8c Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Sat, 27 Feb 2010 20:57:12 +0100 Subject: usb: cdc-wdm: Fix deadlock between write and resume The new runtime PM scheme allows resume() to have no locks. This fixes the deadlock. Signed-off-by: Oliver Neukum Signed-off-by: Greg Kroah-Hartman --- drivers/usb/class/cdc-wdm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index b57490508860..189141ca4e05 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -839,10 +839,10 @@ static int wdm_resume(struct usb_interface *intf) int rv; dev_dbg(&desc->intf->dev, "wdm%d_resume\n", intf->minor); - mutex_lock(&desc->lock); + clear_bit(WDM_SUSPENDING, &desc->flags); rv = recover_from_urb_loss(desc); - mutex_unlock(&desc->lock); + return rv; } #endif -- cgit v1.2.3 From 510607db7e2ad5078c554911418a71b469886076 Mon Sep 17 00:00:00 2001 From: Stefan Schmidt Date: Wed, 3 Mar 2010 19:37:12 +0100 Subject: USB: serial: Fix module name typo for qcaux Kconfig entry. The module is called qcaux and not moto_modem. Also use help instead of ---help-- to be in sync with the other Kconfig entries. Signed-off-by: Stefan Schmidt Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig index c78b255e3f83..a0ecb42cb33a 100644 --- a/drivers/usb/serial/Kconfig +++ b/drivers/usb/serial/Kconfig @@ -474,14 +474,14 @@ config USB_SERIAL_OTI6858 config USB_SERIAL_QCAUX tristate "USB Qualcomm Auxiliary Serial Port Driver" - ---help--- + help Say Y here if you want to use the auxiliary serial ports provided by many modems based on Qualcomm chipsets. These ports often use a proprietary protocol called DM and cannot be used for AT- or PPP-based communication. To compile this driver as a module, choose M here: the - module will be called moto_modem. If unsure, choose N. + module will be called qcaux. If unsure, choose N. config USB_SERIAL_QUALCOMM tristate "USB Qualcomm Serial modem" -- cgit v1.2.3 From 33c387529b7931248c6637bf9720ac7504a0b28b Mon Sep 17 00:00:00 2001 From: spark Date: Fri, 5 Mar 2010 14:18:05 +0800 Subject: USB: option.c: Add Pirelli VID/PID and indicate Pirelli's modem interface is 0xff Signed-off-by: spark Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/option.c | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 3af1eb8aa635..950cb311ca94 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -335,6 +335,24 @@ static int option_resume(struct usb_serial *serial); #define ALCATEL_VENDOR_ID 0x1bbb #define ALCATEL_PRODUCT_X060S 0x0000 +#define PIRELLI_VENDOR_ID 0x1266 +#define PIRELLI_PRODUCT_C100_1 0x1002 +#define PIRELLI_PRODUCT_C100_2 0x1003 +#define PIRELLI_PRODUCT_1004 0x1004 +#define PIRELLI_PRODUCT_1005 0x1005 +#define PIRELLI_PRODUCT_1006 0x1006 +#define PIRELLI_PRODUCT_1007 0x1007 +#define PIRELLI_PRODUCT_1008 0x1008 +#define PIRELLI_PRODUCT_1009 0x1009 +#define PIRELLI_PRODUCT_100A 0x100a +#define PIRELLI_PRODUCT_100B 0x100b +#define PIRELLI_PRODUCT_100C 0x100c +#define PIRELLI_PRODUCT_100D 0x100d +#define PIRELLI_PRODUCT_100E 0x100e +#define PIRELLI_PRODUCT_100F 0x100f +#define PIRELLI_PRODUCT_1011 0x1011 +#define PIRELLI_PRODUCT_1012 0x1012 + /* Airplus products */ #define AIRPLUS_VENDOR_ID 0x1011 #define AIRPLUS_PRODUCT_MCD650 0x3198 @@ -679,6 +697,24 @@ static const struct usb_device_id option_ids[] = { .driver_info = (kernel_ulong_t)&four_g_w14_blacklist }, { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) }, + /* Pirelli */ + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_1)}, + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_2)}, + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1004)}, + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1005)}, + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1006)}, + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1007)}, + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1008)}, + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1009)}, + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100A)}, + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100B) }, + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100C) }, + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100D) }, + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100E) }, + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) }, + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)}, + { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)}, + { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); @@ -802,12 +838,19 @@ static int option_probe(struct usb_serial *serial, const struct usb_device_id *id) { struct option_intf_private *data; + /* D-Link DWM 652 still exposes CD-Rom emulation interface in modem mode */ if (serial->dev->descriptor.idVendor == DLINK_VENDOR_ID && serial->dev->descriptor.idProduct == DLINK_PRODUCT_DWM_652 && serial->interface->cur_altsetting->desc.bInterfaceClass == 0x8) return -ENODEV; + /* Bandrich modem and AT command interface is 0xff */ + if ((serial->dev->descriptor.idVendor == BANDRICH_VENDOR_ID || + serial->dev->descriptor.idVendor == PIRELLI_VENDOR_ID) && + serial->interface->cur_altsetting->desc.bInterfaceClass != 0xff) + return -ENODEV; + data = serial->private = kzalloc(sizeof(struct option_intf_private), GFP_KERNEL); if (!data) return -ENOMEM; -- cgit v1.2.3 From 872f8b42544c58dfa241956d220ada115a8e93c7 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Sat, 6 Mar 2010 14:08:56 +0300 Subject: USB: goku_udc: remove potential null dereference "dev" is always null here. In the end it's only used to get the pci_name() of "pdev" which is redundant information and so I removed it. Signed-off-by: Dan Carpenter Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/goku_udc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c index e8edc640381e..1088d08c7ed8 100644 --- a/drivers/usb/gadget/goku_udc.c +++ b/drivers/usb/gadget/goku_udc.c @@ -1768,7 +1768,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id) * usb_gadget_driver_{register,unregister}() must change. */ if (the_controller) { - WARNING(dev, "ignoring %s\n", pci_name(pdev)); + pr_warning("ignoring %s\n", pci_name(pdev)); return -EBUSY; } if (!pdev->irq) { -- cgit v1.2.3 From f2984a333fb5e325d478950c9d8af3693869e69c Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Tue, 9 Mar 2010 00:35:22 -0500 Subject: USB: gadget: fix Blackfin builds after gadget cleansing The recent change to clean out dead gadget drivers (90f7976880bbbf99) missed the call to gadget_is_musbhsfc() behind CONFIG_BLACKFIN. This causes Blackfin gadget builds to fail since the function no longer exists anywhere. Signed-off-by: Mike Frysinger Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/epautoconf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c index 65a5f94cbc04..3568de210f79 100644 --- a/drivers/usb/gadget/epautoconf.c +++ b/drivers/usb/gadget/epautoconf.c @@ -266,7 +266,7 @@ struct usb_ep * __init usb_ep_autoconfig ( } #ifdef CONFIG_BLACKFIN - } else if (gadget_is_musbhsfc(gadget) || gadget_is_musbhdrc(gadget)) { + } else if (gadget_is_musbhdrc(gadget)) { if ((USB_ENDPOINT_XFER_BULK == type) || (USB_ENDPOINT_XFER_ISOC == type)) { if (USB_DIR_IN & desc->bEndpointAddress) -- cgit v1.2.3 From f88f6691b73a35b0c6dcabb9e587aa4c63d09010 Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Sun, 7 Mar 2010 10:36:27 -0500 Subject: USB: g_mass_storage: fix section mismatch warnings The recent commit (0e530b45783f75) that moved usb_ep_autoconfig from the __devinit section to the __init section missed the mass storage device. Its fsg_bind() function uses the usb_ep_autoconfig() function from non __init context leading to: WARNING: drivers/usb/gadget/g_mass_storage.o(.text): Section mismatch in reference from the function _fsg_bind() to the function .init.text:_usb_ep_autoconfig() So move fsg_bind() into __init as well. Signed-off-by: Mike Frysinger Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/f_mass_storage.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c index 5a3cdd08f1d0..db08de2af182 100644 --- a/drivers/usb/gadget/f_mass_storage.c +++ b/drivers/usb/gadget/f_mass_storage.c @@ -2910,7 +2910,7 @@ static void fsg_unbind(struct usb_configuration *c, struct usb_function *f) } -static int fsg_bind(struct usb_configuration *c, struct usb_function *f) +static int __init fsg_bind(struct usb_configuration *c, struct usb_function *f) { struct fsg_dev *fsg = fsg_from_func(f); struct usb_gadget *gadget = c->cdev->gadget; -- cgit v1.2.3 From f479d70b4f7674083c2e3c3e603b15811713fb18 Mon Sep 17 00:00:00 2001 From: Peter Korsgaard Date: Fri, 12 Mar 2010 15:55:28 +0100 Subject: USB: gadget: f_mass_storage::fsg_bind(): fix error handling Contrary to the comment in fsg_add, fsg_bind calls fsg_unbind on errors, which decreases refcount and frees the fsg_dev structure, causing trouble when fsg_add does the same. Fix it by simply leaving up cleanup to fsg_add(). Signed-off-by: Peter Korsgaard Acked-by: Michal Nazarewicz Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/f_mass_storage.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c index db08de2af182..f4911c09022e 100644 --- a/drivers/usb/gadget/f_mass_storage.c +++ b/drivers/usb/gadget/f_mass_storage.c @@ -2954,7 +2954,6 @@ static int __init fsg_bind(struct usb_configuration *c, struct usb_function *f) autoconf_fail: ERROR(fsg, "unable to autoconfigure all endpoints\n"); rc = -ENOTSUPP; - fsg_unbind(c, f); return rc; } -- cgit v1.2.3 From 11b10d999469dc0514447a15e88c7ef14ec0761d Mon Sep 17 00:00:00 2001 From: Michal Nazarewicz Date: Mon, 15 Mar 2010 11:10:23 +0100 Subject: USB: g_mass_storage: fixed module name in Kconfig The Kconfig help message for Mass Storage Gadget claimed the module will be named "g_file_storage" whereas it should be "g_mass_storage". Signed-off-by: Michal Nazarewicz Cc: Kyungmin Park Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index 7460cd797f45..11a3e0fa4331 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -747,7 +747,7 @@ config USB_MASS_STORAGE which may be used with composite framework. Say "y" to link the driver statically, or "m" to build - a dynamically linked module called "g_file_storage". If unsure, + a dynamically linked module called "g_mass_storage". If unsure, consider File-backed Storage Gadget. config USB_G_SERIAL -- cgit v1.2.3 From 9c67d28e4e7683b4f667fa4c7b6f9aee92b44b5c Mon Sep 17 00:00:00 2001 From: Alessio Igor Bogani Date: Sat, 13 Mar 2010 18:35:14 +0100 Subject: USB: ftdi_sio: Fix locking for change_speed() function The change_speed() function should be serialized against multiple calls. Use the cfg_lock mutex to do this. Signed-off-by: Alessio Igor Bogani Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/ftdi_sio.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 6fc09dc3b53e..1d7c4fac02e8 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -91,7 +91,7 @@ struct ftdi_private { unsigned long tx_outstanding_bytes; unsigned long tx_outstanding_urbs; unsigned short max_packet_size; - struct mutex cfg_lock; /* Avoid mess by parallel calls of config ioctl() */ + struct mutex cfg_lock; /* Avoid mess by parallel calls of config ioctl() and change_speed() */ }; /* struct ftdi_sio_quirk is used by devices requiring special attention. */ @@ -1273,8 +1273,8 @@ check_and_exit: (priv->flags & ASYNC_SPD_MASK)) || (((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST) && (old_priv.custom_divisor != priv->custom_divisor))) { - mutex_unlock(&priv->cfg_lock); change_speed(tty, port); + mutex_unlock(&priv->cfg_lock); } else mutex_unlock(&priv->cfg_lock); @@ -2265,9 +2265,11 @@ static void ftdi_set_termios(struct tty_struct *tty, clear_mctrl(port, TIOCM_DTR | TIOCM_RTS); } else { /* set the baudrate determined before */ + mutex_lock(&priv->cfg_lock); if (change_speed(tty, port)) dev_err(&port->dev, "%s urb failed to set baudrate\n", __func__); + mutex_unlock(&priv->cfg_lock); /* Ensure RTS and DTR are raised when baudrate changed from 0 */ if (!old_termios || (old_termios->c_cflag & CBAUD) == B0) set_mctrl(port, TIOCM_DTR | TIOCM_RTS); -- cgit v1.2.3 From 83ba11d93434e6f0cc2e060336b0b19a3f687fa3 Mon Sep 17 00:00:00 2001 From: Maurus Cuelenaere Date: Mon, 8 Mar 2010 18:20:59 +0100 Subject: USB: gadget: add gadget controller number for s3c-hsotg driver This prevents some drivers from complaining that no bcdDevice id was set. Signed-off-by: Maurus Cuelenaere Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/gadget_chips.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h index 1edbc12fff18..e511fec9f26d 100644 --- a/drivers/usb/gadget/gadget_chips.h +++ b/drivers/usb/gadget/gadget_chips.h @@ -136,6 +136,12 @@ #define gadget_is_r8a66597(g) 0 #endif +#ifdef CONFIG_USB_S3C_HSOTG +#define gadget_is_s3c_hsotg(g) (!strcmp("s3c-hsotg", (g)->name)) +#else +#define gadget_is_s3c_hsotg(g) 0 +#endif + /** * usb_gadget_controller_number - support bcdDevice id convention @@ -192,6 +198,8 @@ static inline int usb_gadget_controller_number(struct usb_gadget *gadget) return 0x24; else if (gadget_is_r8a66597(gadget)) return 0x25; + else if (gadget_is_s3c_hsotg(gadget)) + return 0x26; return -ENOENT; } -- cgit v1.2.3 From 7f56cfd253d929c06ce4ed5bfb99a8c6805075c9 Mon Sep 17 00:00:00 2001 From: Christoph Egger Date: Wed, 10 Mar 2010 12:33:11 +0100 Subject: USB: Remove last bit of CONFIG_USB_BERRY_CHARGE One last bit was missed while removing the USB_BERRY_CHARGE config option in a8d4211f33a9573f7b1bdcfd9c9c48631d1515ee which gets dropped by this patch. Signed-off-by: Christoph Egger Signed-off-by: Greg Kroah-Hartman --- drivers/usb/storage/unusual_devs.h | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 61c8b9df96e6..ccf1dbbb87ef 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -1389,20 +1389,6 @@ UNUSUAL_DEV( 0x0f19, 0x0105, 0x0100, 0x0100, US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_IGNORE_RESIDUE ), -/* Jeremy Katz : - * The Blackberry Pearl can run in two modes; a usb-storage only mode - * and a mode that allows access via mass storage and to its database. - * The berry_charge module will set the device to dual mode and thus we - * should ignore its native mode if that module is built - */ -#ifdef CONFIG_USB_BERRY_CHARGE -UNUSUAL_DEV( 0x0fca, 0x0006, 0x0001, 0x0001, - "RIM", - "Blackberry Pearl", - US_SC_DEVICE, US_PR_DEVICE, NULL, - US_FL_IGNORE_DEVICE ), -#endif - /* Reported by Michael Stattmann */ UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000, "Sony Ericsson", -- cgit v1.2.3 From e549a17f698e266373f6757bd068d1e98397b4c0 Mon Sep 17 00:00:00 2001 From: Michael Brunner Date: Wed, 10 Mar 2010 23:26:37 +0100 Subject: USB: cp210x: Remove double usb_control_msg from cp210x_set_config This patch removes a double usb_control_msg that sets the cp210x configuration registers a second time when calling cp210x_set_config. For data sizes >2 the second write gets corrupted. The patch has been created against 2.6.34-rc1, but all cp210x driver revisions are affected. Signed-off-by: Michael Brunner Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/cp210x.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 507382b0a9ed..ec9b0449ccf6 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -313,11 +313,6 @@ static int cp210x_set_config(struct usb_serial_port *port, u8 request, return -EPROTO; } - /* Single data value */ - result = usb_control_msg(serial->dev, - usb_sndctrlpipe(serial->dev, 0), - request, REQTYPE_HOST_TO_DEVICE, data[0], - 0, NULL, 0, 300); return 0; } -- cgit v1.2.3 From f09a15e6e69884cedec4d1c022089a973aa01f1e Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Tue, 16 Mar 2010 12:55:44 -0700 Subject: USB: Fix usb_fill_int_urb for SuperSpeed devices USB 3 and Wireless USB specify a logarithmic encoding of the endpoint interval that matches the USB 2 specification. usb_fill_int_urb() didn't know that and was filling in the interval as if it was USB 1.1. Fix usb_fill_int_urb() for SuperSpeed devices, but leave the wireless case alone, because David Vrabel wants to keep the old encoding. Update the struct urb kernel doc to note that SuperSpeed URBs must have urb->interval specified in microframes. Add a missing break statement in the usb_submit_urb() interrupt URB checking, since wireless USB and SuperSpeed USB encode urb->interval differently. This allows xHCI roothubs to actually register with khubd. Signed-off-by: Matthew Wilcox Signed-off-by: Sarah Sharp Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/urb.c | 1 + include/linux/usb.h | 18 +++++++++++++----- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c index 27080561a1c2..45a32dadb406 100644 --- a/drivers/usb/core/urb.c +++ b/drivers/usb/core/urb.c @@ -453,6 +453,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) if (urb->interval > (1 << 15)) return -EINVAL; max = 1 << 15; + break; case USB_SPEED_WIRELESS: if (urb->interval > 16) return -EINVAL; diff --git a/include/linux/usb.h b/include/linux/usb.h index 8c9f053111bb..ce1323c4e47c 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -1055,7 +1055,8 @@ typedef void (*usb_complete_t)(struct urb *); * @number_of_packets: Lists the number of ISO transfer buffers. * @interval: Specifies the polling interval for interrupt or isochronous * transfers. The units are frames (milliseconds) for full and low - * speed devices, and microframes (1/8 millisecond) for highspeed ones. + * speed devices, and microframes (1/8 millisecond) for highspeed + * and SuperSpeed devices. * @error_count: Returns the number of ISO transfers that reported errors. * @context: For use in completion functions. This normally points to * request-specific driver context. @@ -1286,9 +1287,16 @@ static inline void usb_fill_bulk_urb(struct urb *urb, * * Initializes a interrupt urb with the proper information needed to submit * it to a device. - * Note that high speed interrupt endpoints use a logarithmic encoding of - * the endpoint interval, and express polling intervals in microframes - * (eight per millisecond) rather than in frames (one per millisecond). + * + * Note that High Speed and SuperSpeed interrupt endpoints use a logarithmic + * encoding of the endpoint interval, and express polling intervals in + * microframes (eight per millisecond) rather than in frames (one per + * millisecond). + * + * Wireless USB also uses the logarithmic encoding, but specifies it in units of + * 128us instead of 125us. For Wireless USB devices, the interval is passed + * through to the host controller, rather than being translated into microframe + * units. */ static inline void usb_fill_int_urb(struct urb *urb, struct usb_device *dev, @@ -1305,7 +1313,7 @@ static inline void usb_fill_int_urb(struct urb *urb, urb->transfer_buffer_length = buffer_length; urb->complete = complete_fn; urb->context = context; - if (dev->speed == USB_SPEED_HIGH) + if (dev->speed == USB_SPEED_HIGH || dev->speed == USB_SPEED_SUPER) urb->interval = 1 << (interval - 1); else urb->interval = interval; -- cgit v1.2.3 From 9ce669a8924c61b7321d6e2f27ed67bcd46c1fbb Mon Sep 17 00:00:00 2001 From: Sarah Sharp Date: Tue, 16 Mar 2010 12:59:24 -0700 Subject: USB: xhci: Make endpoint interval debugging clearer. The xHCI hardware can only handle polling intervals that are a power of two. When we add a new endpoint during a bandwidth allocation, and the polling interval is rounded down to a power of two, print the original polling interval in the endpoint descriptor. Signed-off-by: Sarah Sharp Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/xhci-mem.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 49f7d72f8b1b..bba9b19ed1b9 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -566,8 +566,13 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev, if (interval < 3) interval = 3; if ((1 << interval) != 8*ep->desc.bInterval) - dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n", - ep->desc.bEndpointAddress, 1 << interval); + dev_warn(&udev->dev, + "ep %#x - rounding interval" + " to %d microframes, " + "ep desc says %d microframes\n", + ep->desc.bEndpointAddress, + 1 << interval, + 8*ep->desc.bInterval); } break; default: -- cgit v1.2.3 From d835933436ac0d1e8f5b35fe809fd4e767e55d6e Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Tue, 16 Mar 2010 12:29:35 +0900 Subject: usb: r8a66597-hcd: fix removed from an attached hub fix the problem that when a USB hub is attached to the r8a66597-hcd and a device is removed from that hub, it's likely that a kernel panic follows. Reported-by: Markus Pietrek Signed-off-by: Yoshihiro Shimoda Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/r8a66597-hcd.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c index bee558aed427..f71a73a93d0c 100644 --- a/drivers/usb/host/r8a66597-hcd.c +++ b/drivers/usb/host/r8a66597-hcd.c @@ -418,7 +418,7 @@ static u8 alloc_usb_address(struct r8a66597 *r8a66597, struct urb *urb) /* this function must be called with interrupt disabled */ static void free_usb_address(struct r8a66597 *r8a66597, - struct r8a66597_device *dev) + struct r8a66597_device *dev, int reset) { int port; @@ -430,7 +430,13 @@ static void free_usb_address(struct r8a66597 *r8a66597, dev->state = USB_STATE_DEFAULT; r8a66597->address_map &= ~(1 << dev->address); dev->address = 0; - dev_set_drvdata(&dev->udev->dev, NULL); + /* + * Only when resetting USB, it is necessary to erase drvdata. When + * a usb device with usb hub is disconnect, "dev->udev" is already + * freed on usb_desconnect(). So we cannot access the data. + */ + if (reset) + dev_set_drvdata(&dev->udev->dev, NULL); list_del(&dev->device_list); kfree(dev); @@ -1069,7 +1075,7 @@ static void r8a66597_usb_disconnect(struct r8a66597 *r8a66597, int port) struct r8a66597_device *dev = r8a66597->root_hub[port].dev; disable_r8a66597_pipe_all(r8a66597, dev); - free_usb_address(r8a66597, dev); + free_usb_address(r8a66597, dev, 0); start_root_hub_sampling(r8a66597, port, 0); } @@ -2085,7 +2091,7 @@ static void update_usb_address_map(struct r8a66597 *r8a66597, spin_lock_irqsave(&r8a66597->lock, flags); dev = get_r8a66597_device(r8a66597, addr); disable_r8a66597_pipe_all(r8a66597, dev); - free_usb_address(r8a66597, dev); + free_usb_address(r8a66597, dev, 0); put_child_connect_map(r8a66597, addr); spin_unlock_irqrestore(&r8a66597->lock, flags); } @@ -2228,7 +2234,7 @@ static int r8a66597_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, rh->port |= (1 << USB_PORT_FEAT_RESET); disable_r8a66597_pipe_all(r8a66597, dev); - free_usb_address(r8a66597, dev); + free_usb_address(r8a66597, dev, 1); r8a66597_mdfy(r8a66597, USBRST, USBRST | UACT, get_dvstctr_reg(port)); -- cgit v1.2.3 From 4cb80cda51ff950614701fb30c9d4e583fe5a31f Mon Sep 17 00:00:00 2001 From: Peter Korsgaard Date: Fri, 12 Mar 2010 12:33:15 +0100 Subject: USB: gadget/multi: cdc_do_config: remove redundant check cdc_do_config() had a double ret check after fsg_add(). Signed-off-by: Peter Korsgaard Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/multi.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/usb/gadget/multi.c b/drivers/usb/gadget/multi.c index 76496f5d272c..a930d7fd7e7a 100644 --- a/drivers/usb/gadget/multi.c +++ b/drivers/usb/gadget/multi.c @@ -211,8 +211,6 @@ static int __init cdc_do_config(struct usb_configuration *c) ret = fsg_add(c->cdev, c, fsg_common); if (ret < 0) return ret; - if (ret < 0) - return ret; return 0; } -- cgit v1.2.3 From 17cf4442497cb2551eae1dedee638515db47c23e Mon Sep 17 00:00:00 2001 From: Jeff Garzik Date: Fri, 19 Mar 2010 14:25:45 -0400 Subject: Delete zero-length file drivers/mtd/maps/omap_nor.c The content was deleted in cc87edb173effdf74e680ee6d622a935ff0c1d6f, but the file remained as a zero-length file. Signed-off-by: Jeff Garzik --- drivers/mtd/maps/omap_nor.c | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 drivers/mtd/maps/omap_nor.c diff --git a/drivers/mtd/maps/omap_nor.c b/drivers/mtd/maps/omap_nor.c deleted file mode 100644 index e69de29bb2d1..000000000000 -- cgit v1.2.3 From 220bf991b0366cc50a94feede3d7341fa5710ee4 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 19 Mar 2010 18:17:57 -0700 Subject: Linux 2.6.34-rc2 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 08ff02da7ce3..a5ba759e0fd5 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 34 -EXTRAVERSION = -rc1 +EXTRAVERSION = -rc2 NAME = Man-Eating Seals of Antiquity # *DOCUMENTATION* -- cgit v1.2.3 From 101545f6fef4a0a3ea8daf0b5b880df2c6a92a69 Mon Sep 17 00:00:00 2001 From: Marcel Holtmann Date: Mon, 15 Mar 2010 14:12:58 -0700 Subject: Bluetooth: Fix potential bad memory access with sysfs files When creating a high number of Bluetooth sockets (L2CAP, SCO and RFCOMM) it is possible to scribble repeatedly on arbitrary pages of memory. Ensure that the content of these sysfs files is always less than one page. Even if this means truncating. The files in question are scheduled to be moved over to debugfs in the future anyway. Based on initial patches from Neil Brown and Linus Torvalds Reported-by: Neil Brown Signed-off-by: Marcel Holtmann --- net/bluetooth/l2cap.c | 10 +++++++++- net/bluetooth/rfcomm/core.c | 13 ++++++++++++- net/bluetooth/rfcomm/sock.c | 11 ++++++++++- net/bluetooth/sco.c | 11 ++++++++++- 4 files changed, 41 insertions(+), 4 deletions(-) diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index 4db7ae2fe07d..27551820741e 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c @@ -3944,16 +3944,24 @@ static ssize_t l2cap_sysfs_show(struct class *dev, struct sock *sk; struct hlist_node *node; char *str = buf; + int size = PAGE_SIZE; read_lock_bh(&l2cap_sk_list.lock); sk_for_each(sk, node, &l2cap_sk_list.head) { struct l2cap_pinfo *pi = l2cap_pi(sk); + int len; - str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n", + len = snprintf(str, size, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n", batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), sk->sk_state, __le16_to_cpu(pi->psm), pi->scid, pi->dcid, pi->imtu, pi->omtu, pi->sec_level); + + size -= len; + if (size <= 0) + break; + + str += len; } read_unlock_bh(&l2cap_sk_list.lock); diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index db8a68e1a5ba..cf164073269d 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -2105,6 +2105,7 @@ static ssize_t rfcomm_dlc_sysfs_show(struct class *dev, struct rfcomm_session *s; struct list_head *pp, *p; char *str = buf; + int size = PAGE_SIZE; rfcomm_lock(); @@ -2113,11 +2114,21 @@ static ssize_t rfcomm_dlc_sysfs_show(struct class *dev, list_for_each(pp, &s->dlcs) { struct sock *sk = s->sock->sk; struct rfcomm_dlc *d = list_entry(pp, struct rfcomm_dlc, list); + int len; - str += sprintf(str, "%s %s %ld %d %d %d %d\n", + len = snprintf(str, size, "%s %s %ld %d %d %d %d\n", batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), d->state, d->dlci, d->mtu, d->rx_credits, d->tx_credits); + + size -= len; + if (size <= 0) + break; + + str += len; } + + if (size <= 0) + break; } rfcomm_unlock(); diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index ca87d6ac6a20..8d0ee0b8a6b6 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -1068,13 +1068,22 @@ static ssize_t rfcomm_sock_sysfs_show(struct class *dev, struct sock *sk; struct hlist_node *node; char *str = buf; + int size = PAGE_SIZE; read_lock_bh(&rfcomm_sk_list.lock); sk_for_each(sk, node, &rfcomm_sk_list.head) { - str += sprintf(str, "%s %s %d %d\n", + int len; + + len = snprintf(str, size, "%s %s %d %d\n", batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), sk->sk_state, rfcomm_pi(sk)->channel); + + size -= len; + if (size <= 0) + break; + + str += len; } read_unlock_bh(&rfcomm_sk_list.lock); diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index f93b939539bc..967a75175c66 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -960,13 +960,22 @@ static ssize_t sco_sysfs_show(struct class *dev, struct sock *sk; struct hlist_node *node; char *str = buf; + int size = PAGE_SIZE; read_lock_bh(&sco_sk_list.lock); sk_for_each(sk, node, &sco_sk_list.head) { - str += sprintf(str, "%s %s %d\n", + int len; + + len = snprintf(str, size, "%s %s %d\n", batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), sk->sk_state); + + size -= len; + if (size <= 0) + break; + + str += len; } read_unlock_bh(&sco_sk_list.lock); -- cgit v1.2.3 From aef7d97cc604309b66f6f45cce02cd734934cd4e Mon Sep 17 00:00:00 2001 From: Marcel Holtmann Date: Sun, 21 Mar 2010 05:27:45 +0100 Subject: Bluetooth: Convert debug files to actually use debugfs instead of sysfs Some of the debug files ended up wrongly in sysfs, because at that point of time, debugfs didn't exist. Convert these files to use debugfs and also seq_file. This patch converts all of these files at once and then removes the exported symbol for the Bluetooth sysfs class. Signed-off-by: Marcel Holtmann --- include/net/bluetooth/bluetooth.h | 2 +- net/bluetooth/hci_sysfs.c | 3 +-- net/bluetooth/l2cap.c | 51 ++++++++++++++++++++++---------------- net/bluetooth/rfcomm/core.c | 52 ++++++++++++++++++++++----------------- net/bluetooth/rfcomm/sock.c | 47 ++++++++++++++++++++--------------- net/bluetooth/sco.c | 47 +++++++++++++++++++---------------- 6 files changed, 114 insertions(+), 88 deletions(-) diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index 04a6908e38d2..ff77e8f882f1 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -176,6 +176,6 @@ extern void hci_sock_cleanup(void); extern int bt_sysfs_init(void); extern void bt_sysfs_cleanup(void); -extern struct class *bt_class; +extern struct dentry *bt_debugfs; #endif /* __BLUETOOTH_H */ diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index cafb55b0cea5..05fd125f74fe 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c @@ -8,8 +8,7 @@ #include #include -struct class *bt_class = NULL; -EXPORT_SYMBOL_GPL(bt_class); +static struct class *bt_class; struct dentry *bt_debugfs = NULL; EXPORT_SYMBOL_GPL(bt_debugfs); diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index 27551820741e..43e17f7d7ecd 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c @@ -40,6 +40,8 @@ #include #include #include +#include +#include #include #include #include @@ -3937,39 +3939,42 @@ drop: return 0; } -static ssize_t l2cap_sysfs_show(struct class *dev, - struct class_attribute *attr, - char *buf) +static int l2cap_debugfs_show(struct seq_file *f, void *p) { struct sock *sk; struct hlist_node *node; - char *str = buf; - int size = PAGE_SIZE; read_lock_bh(&l2cap_sk_list.lock); sk_for_each(sk, node, &l2cap_sk_list.head) { struct l2cap_pinfo *pi = l2cap_pi(sk); - int len; - - len = snprintf(str, size, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n", - batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), - sk->sk_state, __le16_to_cpu(pi->psm), pi->scid, - pi->dcid, pi->imtu, pi->omtu, pi->sec_level); - - size -= len; - if (size <= 0) - break; - str += len; + seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n", + batostr(&bt_sk(sk)->src), + batostr(&bt_sk(sk)->dst), + sk->sk_state, __le16_to_cpu(pi->psm), + pi->scid, pi->dcid, + pi->imtu, pi->omtu, pi->sec_level); } read_unlock_bh(&l2cap_sk_list.lock); - return str - buf; + return 0; } -static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL); +static int l2cap_debugfs_open(struct inode *inode, struct file *file) +{ + return single_open(file, l2cap_debugfs_show, inode->i_private); +} + +static const struct file_operations l2cap_debugfs_fops = { + .open = l2cap_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static struct dentry *l2cap_debugfs; static const struct proto_ops l2cap_sock_ops = { .family = PF_BLUETOOTH, @@ -4029,8 +4034,12 @@ static int __init l2cap_init(void) goto error; } - if (class_create_file(bt_class, &class_attr_l2cap) < 0) - BT_ERR("Failed to create L2CAP info file"); + if (bt_debugfs) { + l2cap_debugfs = debugfs_create_file("l2cap", 0444, + bt_debugfs, NULL, &l2cap_debugfs_fops); + if (!l2cap_debugfs) + BT_ERR("Failed to create L2CAP debug file"); + } BT_INFO("L2CAP ver %s", VERSION); BT_INFO("L2CAP socket layer initialized"); @@ -4044,7 +4053,7 @@ error: static void __exit l2cap_exit(void) { - class_remove_file(bt_class, &class_attr_l2cap); + debugfs_remove(l2cap_debugfs); if (bt_sock_unregister(BTPROTO_L2CAP) < 0) BT_ERR("L2CAP socket unregistration failed"); diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index cf164073269d..13f114e8b0f9 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -33,6 +33,8 @@ #include #include #include +#include +#include #include #include #include @@ -2098,14 +2100,10 @@ static struct hci_cb rfcomm_cb = { .security_cfm = rfcomm_security_cfm }; -static ssize_t rfcomm_dlc_sysfs_show(struct class *dev, - struct class_attribute *attr, - char *buf) +static int rfcomm_dlc_debugfs_show(struct seq_file *f, void *x) { struct rfcomm_session *s; struct list_head *pp, *p; - char *str = buf; - int size = PAGE_SIZE; rfcomm_lock(); @@ -2114,29 +2112,33 @@ static ssize_t rfcomm_dlc_sysfs_show(struct class *dev, list_for_each(pp, &s->dlcs) { struct sock *sk = s->sock->sk; struct rfcomm_dlc *d = list_entry(pp, struct rfcomm_dlc, list); - int len; - len = snprintf(str, size, "%s %s %ld %d %d %d %d\n", - batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), - d->state, d->dlci, d->mtu, d->rx_credits, d->tx_credits); - - size -= len; - if (size <= 0) - break; - - str += len; + seq_printf(f, "%s %s %ld %d %d %d %d\n", + batostr(&bt_sk(sk)->src), + batostr(&bt_sk(sk)->dst), + d->state, d->dlci, d->mtu, + d->rx_credits, d->tx_credits); } - - if (size <= 0) - break; } rfcomm_unlock(); - return (str - buf); + return 0; +} + +static int rfcomm_dlc_debugfs_open(struct inode *inode, struct file *file) +{ + return single_open(file, rfcomm_dlc_debugfs_show, inode->i_private); } -static CLASS_ATTR(rfcomm_dlc, S_IRUGO, rfcomm_dlc_sysfs_show, NULL); +static const struct file_operations rfcomm_dlc_debugfs_fops = { + .open = rfcomm_dlc_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static struct dentry *rfcomm_dlc_debugfs; /* ---- Initialization ---- */ static int __init rfcomm_init(void) @@ -2153,8 +2155,12 @@ static int __init rfcomm_init(void) goto unregister; } - if (class_create_file(bt_class, &class_attr_rfcomm_dlc) < 0) - BT_ERR("Failed to create RFCOMM info file"); + if (bt_debugfs) { + rfcomm_dlc_debugfs = debugfs_create_file("rfcomm_dlc", 0444, + bt_debugfs, NULL, &rfcomm_dlc_debugfs_fops); + if (!rfcomm_dlc_debugfs) + BT_ERR("Failed to create RFCOMM debug file"); + } err = rfcomm_init_ttys(); if (err < 0) @@ -2182,7 +2188,7 @@ unregister: static void __exit rfcomm_exit(void) { - class_remove_file(bt_class, &class_attr_rfcomm_dlc); + debugfs_remove(rfcomm_dlc_debugfs); hci_unregister_cb(&rfcomm_cb); diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 8d0ee0b8a6b6..7f439765403d 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -40,6 +40,8 @@ #include #include #include +#include +#include #include #include @@ -1061,37 +1063,38 @@ done: return result; } -static ssize_t rfcomm_sock_sysfs_show(struct class *dev, - struct class_attribute *attr, - char *buf) +static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p) { struct sock *sk; struct hlist_node *node; - char *str = buf; - int size = PAGE_SIZE; read_lock_bh(&rfcomm_sk_list.lock); sk_for_each(sk, node, &rfcomm_sk_list.head) { - int len; - - len = snprintf(str, size, "%s %s %d %d\n", - batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), + seq_printf(f, "%s %s %d %d\n", + batostr(&bt_sk(sk)->src), + batostr(&bt_sk(sk)->dst), sk->sk_state, rfcomm_pi(sk)->channel); - - size -= len; - if (size <= 0) - break; - - str += len; } read_unlock_bh(&rfcomm_sk_list.lock); - return (str - buf); + return 0; } -static CLASS_ATTR(rfcomm, S_IRUGO, rfcomm_sock_sysfs_show, NULL); +static int rfcomm_sock_debugfs_open(struct inode *inode, struct file *file) +{ + return single_open(file, rfcomm_sock_debugfs_show, inode->i_private); +} + +static const struct file_operations rfcomm_sock_debugfs_fops = { + .open = rfcomm_sock_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static struct dentry *rfcomm_sock_debugfs; static const struct proto_ops rfcomm_sock_ops = { .family = PF_BLUETOOTH, @@ -1131,8 +1134,12 @@ int __init rfcomm_init_sockets(void) if (err < 0) goto error; - if (class_create_file(bt_class, &class_attr_rfcomm) < 0) - BT_ERR("Failed to create RFCOMM info file"); + if (bt_debugfs) { + rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444, + bt_debugfs, NULL, &rfcomm_sock_debugfs_fops); + if (!rfcomm_sock_debugfs) + BT_ERR("Failed to create RFCOMM debug file"); + } BT_INFO("RFCOMM socket layer initialized"); @@ -1146,7 +1153,7 @@ error: void rfcomm_cleanup_sockets(void) { - class_remove_file(bt_class, &class_attr_rfcomm); + debugfs_remove(rfcomm_sock_debugfs); if (bt_sock_unregister(BTPROTO_RFCOMM) < 0) BT_ERR("RFCOMM socket layer unregistration failed"); diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 967a75175c66..e5b16b76b22e 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -38,6 +38,8 @@ #include #include #include +#include +#include #include #include @@ -953,37 +955,36 @@ drop: return 0; } -static ssize_t sco_sysfs_show(struct class *dev, - struct class_attribute *attr, - char *buf) +static int sco_debugfs_show(struct seq_file *f, void *p) { struct sock *sk; struct hlist_node *node; - char *str = buf; - int size = PAGE_SIZE; read_lock_bh(&sco_sk_list.lock); sk_for_each(sk, node, &sco_sk_list.head) { - int len; - - len = snprintf(str, size, "%s %s %d\n", - batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst), - sk->sk_state); - - size -= len; - if (size <= 0) - break; - - str += len; + seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src), + batostr(&bt_sk(sk)->dst), sk->sk_state); } read_unlock_bh(&sco_sk_list.lock); - return (str - buf); + return 0; } -static CLASS_ATTR(sco, S_IRUGO, sco_sysfs_show, NULL); +static int sco_debugfs_open(struct inode *inode, struct file *file) +{ + return single_open(file, sco_debugfs_show, inode->i_private); +} + +static const struct file_operations sco_debugfs_fops = { + .open = sco_debugfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static struct dentry *sco_debugfs; static const struct proto_ops sco_sock_ops = { .family = PF_BLUETOOTH, @@ -1041,8 +1042,12 @@ static int __init sco_init(void) goto error; } - if (class_create_file(bt_class, &class_attr_sco) < 0) - BT_ERR("Failed to create SCO info file"); + if (bt_debugfs) { + sco_debugfs = debugfs_create_file("sco", 0444, + bt_debugfs, NULL, &sco_debugfs_fops); + if (!sco_debugfs) + BT_ERR("Failed to create SCO debug file"); + } BT_INFO("SCO (Voice Link) ver %s", VERSION); BT_INFO("SCO socket layer initialized"); @@ -1056,7 +1061,7 @@ error: static void __exit sco_exit(void) { - class_remove_file(bt_class, &class_attr_sco); + debugfs_remove(sco_debugfs); if (bt_sock_unregister(BTPROTO_SCO) < 0) BT_ERR("SCO socket unregistration failed"); -- cgit v1.2.3 From c2c77ec83bdad17fb688557b5b3fdc36661dd1c6 Mon Sep 17 00:00:00 2001 From: Andrei Emeltchenko Date: Fri, 19 Mar 2010 10:26:28 +0200 Subject: Bluetooth: Fix kernel crash on L2CAP stress tests Added very simple check that req buffer has enough space to fit configuration parameters. Shall be enough to reject packets with configuration size more than req buffer. Crash trace below [ 6069.659393] Unable to handle kernel paging request at virtual address 02000205 [ 6069.673034] Internal error: Oops: 805 [#1] PREEMPT ... [ 6069.727172] PC is at l2cap_add_conf_opt+0x70/0xf0 [l2cap] [ 6069.732604] LR is at l2cap_recv_frame+0x1350/0x2e78 [l2cap] ... [ 6070.030303] Backtrace: [ 6070.032806] [] (l2cap_add_conf_opt+0x0/0xf0 [l2cap]) from [] (l2cap_recv_frame+0x1350/0x2e78 [l2cap]) [ 6070.043823] r8:dc5d3100 r7:df2a91d6 r6:00000001 r5:df2a8000 r4:00000200 [ 6070.050659] [] (l2cap_recv_frame+0x0/0x2e78 [l2cap]) from [] (l2cap_recv_acldata+0x2bc/0x350 [l2cap]) [ 6070.061798] [] (l2cap_recv_acldata+0x0/0x350 [l2cap]) from [] (hci_rx_task+0x244/0x478 [bluetooth]) [ 6070.072631] r6:dc647700 r5:00000001 r4:df2ab740 [ 6070.077362] [] (hci_rx_task+0x0/0x478 [bluetooth]) from [] (tasklet_action+0x78/0xd8) [ 6070.087005] [] (tasklet_action+0x0/0xd8) from [] Signed-off-by: Andrei Emeltchenko Acked-by: Gustavo F. Padovan Signed-off-by: Marcel Holtmann --- net/bluetooth/l2cap.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index 43e17f7d7ecd..7794a2e2adce 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c @@ -2832,6 +2832,11 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr int len = cmd->len - sizeof(*rsp); char req[64]; + if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) { + l2cap_send_disconn_req(conn, sk); + goto done; + } + /* throw out any old stored conf requests */ result = L2CAP_CONF_SUCCESS; len = l2cap_parse_conf_rsp(sk, rsp->data, -- cgit v1.2.3 From 634a4b2038a6eba4c211fb906fa2f6ec9a4bbfc7 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sun, 21 Mar 2010 18:01:05 -0700 Subject: net: suppress lockdep-RCU false positive in FIB trie. Allow fib_find_node() to be called either under rcu_read_lock() protection or with RTNL held. Signed-off-by: Paul E. McKenney Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/fib_trie.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index af5d89792860..01ef8ba9025c 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -961,7 +961,9 @@ fib_find_node(struct trie *t, u32 key) struct node *n; pos = 0; - n = rcu_dereference(t->trie); + n = rcu_dereference_check(t->trie, + rcu_read_lock_held() || + lockdep_rtnl_is_held()); while (n != NULL && NODE_TYPE(n) == T_TNODE) { tn = (struct tnode *) n; -- cgit v1.2.3 From 5e016cbf6cffd4a53b7922e0c91b775399d7fe47 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Sun, 21 Mar 2010 20:55:13 -0700 Subject: ipv4: Don't drop redirected route cache entry unless PTMU actually expired TCP sessions over IPv4 can get stuck if routers between endpoints do not fragment packets but implement PMTU instead, and we are using those routers because of an ICMP redirect. Setup is as follows MTU1 MTU2 MTU1 A--------B------C------D with MTU1 > MTU2. A and D are endpoints, B and C are routers. B and C implement PMTU and drop packets larger than MTU2 (for example because DF is set on all packets). TCP sessions are initiated between A and D. There is packet loss between A and D, causing frequent TCP retransmits. After the number of retransmits on a TCP session reaches tcp_retries1, tcp calls dst_negative_advice() prior to each retransmit. This results in route cache entries for the peer to be deleted in ipv4_negative_advice() if the Path MTU is set. If the outstanding data on an affected TCP session is larger than MTU2, packets sent from the endpoints will be dropped by B or C, and ICMP NEEDFRAG will be returned. A and D receive NEEDFRAG messages and update PMTU. Before the next retransmit, tcp will again call dst_negative_advice(), causing the route cache entry (with correct PMTU) to be deleted. The retransmitted packet will be larger than MTU2, causing it to be dropped again. This sequence repeats until the TCP session aborts or is terminated. Problem is fixed by removing redirected route cache entries in ipv4_negative_advice() only if the PMTU is expired. Signed-off-by: Guenter Roeck Signed-off-by: David S. Miller --- net/ipv4/route.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 32d396196df8..54fd68c14c87 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1510,7 +1510,8 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) ip_rt_put(rt); ret = NULL; } else if ((rt->rt_flags & RTCF_REDIRECTED) || - rt->u.dst.expires) { + (rt->u.dst.expires && + time_after_eq(jiffies, rt->u.dst.expires))) { unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src, rt->fl.oif, rt_genid(dev_net(dst->dev))); -- cgit v1.2.3 From 9bf35c8dddd56f7f247a27346f74f5adc18071f4 Mon Sep 17 00:00:00 2001 From: Paulius Zaleckas Date: Sun, 21 Mar 2010 21:19:02 -0700 Subject: if_tunnel.h: add missing ams/byteorder.h include When compiling userspace application which includes if_tunnel.h and uses GRE_* defines you will get undefined reference to __cpu_to_be16. Fix this by adding missing #include Cc: stable@kernel.org Signed-off-by: Paulius Zaleckas Signed-off-by: David S. Miller --- include/linux/if_tunnel.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h index 1822d635be6b..16b92d008bed 100644 --- a/include/linux/if_tunnel.h +++ b/include/linux/if_tunnel.h @@ -2,6 +2,7 @@ #define _IF_TUNNEL_H_ #include +#include #ifdef __KERNEL__ #include -- cgit v1.2.3 From 243aad830e8a4cdda261626fbaeddde16b08d04a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20Ter=C3=A4s?= Date: Sat, 20 Mar 2010 02:27:58 +0000 Subject: ip_gre: include route header_len in max_headroom calculation Taking route's header_len into account, and updating gre device needed_headroom will give better hints on upper bound of required headroom. This is useful if the gre traffic is xfrm'ed. Signed-off-by: Timo Teras Acked-by: Herbert Xu Signed-off-by: David S. Miller --- net/ipv4/ip_gre.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index f47c9f76754b..f78402d097b3 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -810,11 +810,13 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev tunnel->err_count = 0; } - max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen; + max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->u.dst.header_len; if (skb_headroom(skb) < max_headroom || skb_shared(skb)|| (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); + if (max_headroom > dev->needed_headroom) + dev->needed_headroom = max_headroom; if (!new_skb) { ip_rt_put(rt); txq->tx_dropped++; -- cgit v1.2.3 From ec64213c4d482ee4d15b34511441eaecdd002adf Mon Sep 17 00:00:00 2001 From: Amit Shah Date: Fri, 19 Mar 2010 17:36:43 +0530 Subject: virtio: console: Generate a kobject CHANGE event on adding 'name' attribute When the host lets us know what 'name' a port is assigned, we create the sysfs 'name' attribute. Generate a 'change' event after this so that udev wakes up and acts on the rules for virtio-ports (currently there's only one rule that creates a symlink from the 'name' to the actual char device). Signed-off-by: Amit Shah Signed-off-by: Michael S. Tsirkin --- drivers/char/virtio_console.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index f404ccfc9c20..67b474b4167c 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -947,11 +947,18 @@ static void handle_control_message(struct ports_device *portdev, */ err = sysfs_create_group(&port->dev->kobj, &port_attribute_group); - if (err) + if (err) { dev_err(port->dev, "Error %d creating sysfs device attributes\n", err); - + } else { + /* + * Generate a udev event so that appropriate + * symlinks can be created based on udev + * rules. + */ + kobject_uevent(&port->dev->kobj, KOBJ_CHANGE); + } break; case VIRTIO_CONSOLE_PORT_REMOVE: /* -- cgit v1.2.3 From 2de16a493cc6153f7fa0b9da12a3862d063e3425 Mon Sep 17 00:00:00 2001 From: Amit Shah Date: Fri, 19 Mar 2010 17:36:44 +0530 Subject: virtio: console: Check if port is valid in resize_console The console port could have been hot-unplugged. Check if it is valid before working on it. Signed-off-by: Amit Shah Signed-off-by: Michael S. Tsirkin --- drivers/char/virtio_console.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 67b474b4167c..44288ce0cb45 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -681,6 +681,10 @@ static void resize_console(struct port *port) struct virtio_device *vdev; struct winsize ws; + /* The port could have been hot-unplugged */ + if (!port) + return; + vdev = port->portdev->vdev; if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) { vdev->config->get(vdev, -- cgit v1.2.3 From 5b89d2f9ace1970324facc68ca9b8fae19ce8096 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Tue, 9 Mar 2010 20:38:48 +0100 Subject: edac, mce: Filter out invalid values Print the CPU associated with the error only when the field is valid. Cc: # .32.x .33.x Signed-off-by: Borislav Petkov --- drivers/edac/edac_mce_amd.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c index 8fc91a019620..f5b6d9fe4def 100644 --- a/drivers/edac/edac_mce_amd.c +++ b/drivers/edac/edac_mce_amd.c @@ -316,7 +316,12 @@ void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors) if (regs->nbsh & K8_NBSH_ERR_CPU_VAL) pr_cont(", core: %u\n", (u8)(regs->nbsh & 0xf)); } else { - pr_cont(", core: %d\n", fls((regs->nbsh & 0xf) - 1)); + u8 assoc_cpus = regs->nbsh & 0xf; + + if (assoc_cpus > 0) + pr_cont(", core: %d", fls(assoc_cpus) - 1); + + pr_cont("\n"); } pr_emerg("%s.\n", EXT_ERR_MSG(xec)); -- cgit v1.2.3 From a90110c61073eab95d1986322693c2b9a8a6a5f6 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Sun, 21 Mar 2010 21:51:51 +0100 Subject: x86 / perf: Fix suspend to RAM on HP nx6325 Commit 3f6da3905398826d85731247e7fbcf53400c18bd (perf: Rework and fix the arch CPU-hotplug hooks) broke suspend to RAM on my HP nx6325 (and most likely on other AMD-based boxes too) by allowing amd_pmu_cpu_offline() to be executed for CPUs that are going offline as part of the suspend process. The problem is that cpuhw->amd_nb may be NULL already, so the function should make sure it's not NULL before accessing the object pointed to by it. Signed-off-by: Rafael J. Wysocki Signed-off-by: Linus Torvalds --- arch/x86/kernel/cpu/perf_event_amd.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index 573458f1caf2..b87e0b6970cb 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c @@ -348,10 +348,12 @@ static void amd_pmu_cpu_offline(int cpu) raw_spin_lock(&amd_nb_lock); - if (--cpuhw->amd_nb->refcnt == 0) - kfree(cpuhw->amd_nb); + if (cpuhw->amd_nb) { + if (--cpuhw->amd_nb->refcnt == 0) + kfree(cpuhw->amd_nb); - cpuhw->amd_nb = NULL; + cpuhw->amd_nb = NULL; + } raw_spin_unlock(&amd_nb_lock); } -- cgit v1.2.3 From 45575f5a426c01ec68cd33d998267ba2f6278fac Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Mon, 22 Mar 2010 17:47:59 +1100 Subject: ppc64 sys_ipc breakage in 2.6.34-rc2 I chased down a fail on ppc64 on 2.6.34-rc2 where an application that uses shared memory was getting a SEGV. Commit baed7fc9b580bd3fb8252ff1d9b36eaf1f86b670 ("Add generic sys_ipc wrapper") changed the second argument from an unsigned long to an int. When we call shmget the system call wrappers for sys_ipc will sign extend second (ie the size) which truncates it. It took a while to track down because the call succeeds and strace shows the untruncated size :) The patch below changes second from an int to an unsigned long which fixes shmget on ppc64 (and I assume s390, sparc64 and mips64). Signed-off-by: Anton Blanchard -- I assume the function prototypes for the other IPC methods would cause us to sign or zero extend second where appropriate (avoiding any security issues). Come to think of it, the syscall wrappers for each method should do that for us as well. Signed-off-by: Linus Torvalds --- ipc/syscall.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ipc/syscall.c b/ipc/syscall.c index 355a3da9ec73..1d6f53f6b562 100644 --- a/ipc/syscall.c +++ b/ipc/syscall.c @@ -13,7 +13,7 @@ #include #include -SYSCALL_DEFINE6(ipc, unsigned int, call, int, first, int, second, +SYSCALL_DEFINE6(ipc, unsigned int, call, int, first, unsigned long, second, unsigned long, third, void __user *, ptr, long, fifth) { int version, ret; -- cgit v1.2.3 From 99b437a9257cb6b267bf32adfb7675948dc6d485 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 22 Mar 2010 13:07:14 +0000 Subject: AFS: Potential null dereference It seems clear from the surrounding code that xpermits is allowed to be NULL here. Signed-off-by: Dan Carpenter Signed-off-by: David Howells Signed-off-by: Linus Torvalds --- fs/afs/security.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fs/afs/security.c b/fs/afs/security.c index 3ef504370034..bb4ed144d0e4 100644 --- a/fs/afs/security.c +++ b/fs/afs/security.c @@ -189,8 +189,9 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key, long acl_order) if (!permits) goto out_unlock; - memcpy(permits->permits, xpermits->permits, - count * sizeof(struct afs_permit)); + if (xpermits) + memcpy(permits->permits, xpermits->permits, + count * sizeof(struct afs_permit)); _debug("key %x access %x", key_serial(key), vnode->status.caller_access); -- cgit v1.2.3 From c3824d21eb653fe7017476724257ccaa8bf3d9e1 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Mon, 22 Mar 2010 13:50:19 +0000 Subject: rxrpc: Check allocation failure. alloc_skb() can return NULL. Signed-off-by: Tetsuo Handa Signed-off-by: David Howells Signed-off-by: Linus Torvalds --- net/rxrpc/ar-accept.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/net/rxrpc/ar-accept.c b/net/rxrpc/ar-accept.c index 77228f28fa36..2d744f22a9a1 100644 --- a/net/rxrpc/ar-accept.c +++ b/net/rxrpc/ar-accept.c @@ -88,6 +88,11 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local, /* get a notification message to send to the server app */ notification = alloc_skb(0, GFP_NOFS); + if (!notification) { + _debug("no memory"); + ret = -ENOMEM; + goto error_nofree; + } rxrpc_new_skb(notification); notification->mark = RXRPC_SKB_MARK_NEW_CALL; @@ -189,6 +194,7 @@ invalid_service: ret = -ECONNREFUSED; error: rxrpc_free_skb(notification); +error_nofree: _leave(" = %d", ret); return ret; } -- cgit v1.2.3 From 7c3456fdb503071787f7f972de1069b9cacd16f0 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Mon, 22 Mar 2010 10:22:55 -0700 Subject: rxrpc: Check allocation failure. alloc_skb() can return NULL. Signed-off-by: Tetsuo Handa Signed-off-by: David Howells Signed-off-by: David S. Miller --- net/rxrpc/ar-accept.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/net/rxrpc/ar-accept.c b/net/rxrpc/ar-accept.c index 77228f28fa36..2d744f22a9a1 100644 --- a/net/rxrpc/ar-accept.c +++ b/net/rxrpc/ar-accept.c @@ -88,6 +88,11 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local, /* get a notification message to send to the server app */ notification = alloc_skb(0, GFP_NOFS); + if (!notification) { + _debug("no memory"); + ret = -ENOMEM; + goto error_nofree; + } rxrpc_new_skb(notification); notification->mark = RXRPC_SKB_MARK_NEW_CALL; @@ -189,6 +194,7 @@ invalid_service: ret = -ECONNREFUSED; error: rxrpc_free_skb(notification); +error_nofree: _leave(" = %d", ret); return ret; } -- cgit v1.2.3 From ef1691504c83ba3eb636c0cfd3ed33f7a6d0b4ee Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Mon, 22 Mar 2010 18:25:20 +0100 Subject: netfilter: xt_recent: fix regression in rules using a zero hit_count Commit 8ccb92ad (netfilter: xt_recent: fix false match) fixed supposedly false matches in rules using a zero hit_count. As it turns out there is nothing false about these matches and people are actually using entries with a hit_count of zero to make rules dependant on addresses inserted manually through /proc. Since this slipped past the eyes of three reviewers, instead of reverting the commit in question, this patch explicitly checks for a hit_count of zero to make the intentions more clear. Reported-by: Thomas Jarosch Tested-by: Thomas Jarosch Cc: stable@kernel.org Signed-off-by: Patrick McHardy --- net/netfilter/xt_recent.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index 7073dbb8100c..971d172afece 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c @@ -267,7 +267,7 @@ recent_mt(const struct sk_buff *skb, const struct xt_match_param *par) for (i = 0; i < e->nstamps; i++) { if (info->seconds && time_after(time, e->stamps[i])) continue; - if (info->hit_count && ++hits >= info->hit_count) { + if (!info->hit_count || ++hits >= info->hit_count) { ret = !ret; break; } -- cgit v1.2.3 From ae6be51ed01d6c4aaf249a207b4434bc7785853b Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 22 Mar 2010 13:12:33 -0700 Subject: Fix up prototype for sys_ipc breakage Commit 45575f5a426c ("ppc64 sys_ipc breakage in 2.6.34-rc2") fixed the definition of the sys_ipc() helper, but didn't fix the prototype in Reported-and-tested-by: Andreas Schwab Signed-off-by: Linus Torvalds --- include/linux/syscalls.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index f994ae58a002..057929b0a651 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -688,7 +688,7 @@ asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg); asmlinkage long sys_shmget(key_t key, size_t size, int flag); asmlinkage long sys_shmdt(char __user *shmaddr); asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf); -asmlinkage long sys_ipc(unsigned int call, int first, int second, +asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second, unsigned long third, void __user *ptr, long fifth); asmlinkage long sys_mq_open(const char __user *name, int oflag, mode_t mode, struct mq_attr __user *attr); -- cgit v1.2.3 From 21afc27c9f9ae1f6370c47b323be7f3b75106569 Mon Sep 17 00:00:00 2001 From: Mike Frysinger Date: Sun, 21 Mar 2010 21:06:01 +0000 Subject: can: bfin_can: switch to common Blackfin can header The MMR bits are being moved to this header, so include it. Signed-off-by: Mike Frysinger Acked-by: Wolfgang Grandegger Signed-off-by: David S. Miller --- drivers/net/can/bfin_can.c | 97 ++++------------------------------------------ 1 file changed, 7 insertions(+), 90 deletions(-) diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c index 866905fa4119..03489864376d 100644 --- a/drivers/net/can/bfin_can.c +++ b/drivers/net/can/bfin_can.c @@ -22,96 +22,13 @@ #include #include +#include #include #define DRV_NAME "bfin_can" #define BFIN_CAN_TIMEOUT 100 #define TX_ECHO_SKB_MAX 1 -/* - * transmit and receive channels - */ -#define TRANSMIT_CHL 24 -#define RECEIVE_STD_CHL 0 -#define RECEIVE_EXT_CHL 4 -#define RECEIVE_RTR_CHL 8 -#define RECEIVE_EXT_RTR_CHL 12 -#define MAX_CHL_NUMBER 32 - -/* - * bfin can registers layout - */ -struct bfin_can_mask_regs { - u16 aml; - u16 dummy1; - u16 amh; - u16 dummy2; -}; - -struct bfin_can_channel_regs { - u16 data[8]; - u16 dlc; - u16 dummy1; - u16 tsv; - u16 dummy2; - u16 id0; - u16 dummy3; - u16 id1; - u16 dummy4; -}; - -struct bfin_can_regs { - /* - * global control and status registers - */ - u16 mc1; /* offset 0 */ - u16 dummy1; - u16 md1; /* offset 4 */ - u16 rsv1[13]; - u16 mbtif1; /* offset 0x20 */ - u16 dummy2; - u16 mbrif1; /* offset 0x24 */ - u16 dummy3; - u16 mbim1; /* offset 0x28 */ - u16 rsv2[11]; - u16 mc2; /* offset 0x40 */ - u16 dummy4; - u16 md2; /* offset 0x44 */ - u16 dummy5; - u16 trs2; /* offset 0x48 */ - u16 rsv3[11]; - u16 mbtif2; /* offset 0x60 */ - u16 dummy6; - u16 mbrif2; /* offset 0x64 */ - u16 dummy7; - u16 mbim2; /* offset 0x68 */ - u16 rsv4[11]; - u16 clk; /* offset 0x80 */ - u16 dummy8; - u16 timing; /* offset 0x84 */ - u16 rsv5[3]; - u16 status; /* offset 0x8c */ - u16 dummy9; - u16 cec; /* offset 0x90 */ - u16 dummy10; - u16 gis; /* offset 0x94 */ - u16 dummy11; - u16 gim; /* offset 0x98 */ - u16 rsv6[3]; - u16 ctrl; /* offset 0xa0 */ - u16 dummy12; - u16 intr; /* offset 0xa4 */ - u16 rsv7[7]; - u16 esr; /* offset 0xb4 */ - u16 rsv8[37]; - - /* - * channel(mailbox) mask and message registers - */ - struct bfin_can_mask_regs msk[MAX_CHL_NUMBER]; /* offset 0x100 */ - struct bfin_can_channel_regs chl[MAX_CHL_NUMBER]; /* offset 0x200 */ -}; - /* * bfin can private data */ @@ -163,7 +80,7 @@ static int bfin_can_set_bittiming(struct net_device *dev) if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) timing |= SAM; - bfin_write16(®->clk, clk); + bfin_write16(®->clock, clk); bfin_write16(®->timing, timing); dev_info(dev->dev.parent, "setting CLOCK=0x%04x TIMING=0x%04x\n", @@ -185,11 +102,11 @@ static void bfin_can_set_reset_mode(struct net_device *dev) bfin_write16(®->gim, 0); /* reset can and enter configuration mode */ - bfin_write16(®->ctrl, SRS | CCR); + bfin_write16(®->control, SRS | CCR); SSYNC(); - bfin_write16(®->ctrl, CCR); + bfin_write16(®->control, CCR); SSYNC(); - while (!(bfin_read16(®->ctrl) & CCA)) { + while (!(bfin_read16(®->control) & CCA)) { udelay(10); if (--timeout == 0) { dev_err(dev->dev.parent, @@ -244,7 +161,7 @@ static void bfin_can_set_normal_mode(struct net_device *dev) /* * leave configuration mode */ - bfin_write16(®->ctrl, bfin_read16(®->ctrl) & ~CCR); + bfin_write16(®->control, bfin_read16(®->control) & ~CCR); while (bfin_read16(®->status) & CCA) { udelay(10); @@ -726,7 +643,7 @@ static int bfin_can_suspend(struct platform_device *pdev, pm_message_t mesg) if (netif_running(dev)) { /* enter sleep mode */ - bfin_write16(®->ctrl, bfin_read16(®->ctrl) | SMR); + bfin_write16(®->control, bfin_read16(®->control) | SMR); SSYNC(); while (!(bfin_read16(®->intr) & SMACK)) { udelay(10); -- cgit v1.2.3 From 5fc05f8764f301138003ff562a31ad3721f1675f Mon Sep 17 00:00:00 2001 From: Amerigo Wang Date: Sun, 21 Mar 2010 22:59:58 +0000 Subject: netpoll: warn when there are spaces in parameters v2: update according to Frans' comments. Currently, if we leave spaces before dst port, netconsole will silently accept it as 0. Warn about this. Also, when spaces appear in other places, make them visible in error messages. Signed-off-by: WANG Cong Cc: David Miller Acked-by: Neil Horman Signed-off-by: David S. Miller --- net/core/netpoll.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/net/core/netpoll.c b/net/core/netpoll.c index d4ec38fa64e6..6f9206b36dc2 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -614,7 +614,7 @@ void netpoll_print_options(struct netpoll *np) np->name, np->local_port); printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip); - printk(KERN_INFO "%s: interface %s\n", + printk(KERN_INFO "%s: interface '%s'\n", np->name, np->dev_name); printk(KERN_INFO "%s: remote port %d\n", np->name, np->remote_port); @@ -661,6 +661,9 @@ int netpoll_parse_options(struct netpoll *np, char *opt) if ((delim = strchr(cur, '@')) == NULL) goto parse_failed; *delim = 0; + if (*cur == ' ' || *cur == '\t') + printk(KERN_INFO "%s: warning: whitespace" + "is not allowed\n", np->name); np->remote_port = simple_strtol(cur, NULL, 10); cur = delim; } @@ -708,7 +711,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt) return 0; parse_failed: - printk(KERN_INFO "%s: couldn't parse config at %s!\n", + printk(KERN_INFO "%s: couldn't parse config at '%s'!\n", np->name, cur); return -1; } -- cgit v1.2.3 From 688328c7ec3cd0dc3b16342aeb045d28012cc955 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Wed, 17 Mar 2010 22:24:39 +0000 Subject: netxen: The driver doesn't work on NX_P3_B1 so cause probe to fail. I haven't been able to get link up on a NX_P3_B1 since 2.6.31. The driver complains about a firmware hang instead. When I asked I was told rev 0x41 was a preproduction rev. So disable support in the driver so no one is surprised the code doesn't work. Signed-off-by: Eric W. Biederman Signed-off-by: David S. Miller --- drivers/net/netxen/netxen_nic_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 08780ef1c1f8..9a7a0f3c36c4 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c @@ -1246,8 +1246,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) int pci_func_id = PCI_FUNC(pdev->devfn); uint8_t revision_id; - if (pdev->revision >= NX_P3_A0 && pdev->revision < NX_P3_B1) { - pr_warning("%s: chip revisions between 0x%x-0x%x" + if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) { + pr_warning("%s: chip revisions between 0x%x-0x%x " "will not be enabled.\n", module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1); return -ENODEV; -- cgit v1.2.3 From 8e039d84b323c4503c4d56863faa47c783660826 Mon Sep 17 00:00:00 2001 From: Ben Blum Date: Tue, 23 Mar 2010 05:24:03 +0000 Subject: cgroups: net_cls as module Allows the net_cls cgroup subsystem to be compiled as a module This patch modifies net/sched/cls_cgroup.c to allow the net_cls subsystem to be optionally compiled as a module instead of builtin. The cgroup_subsys struct is moved around a bit to allow the subsys_id to be either declared as a compile-time constant by the cgroup_subsys.h include in cgroup.h, or, if it's a module, initialized within the struct by cgroup_load_subsys. Signed-off-by: Ben Blum Acked-by: Li Zefan Cc: Paul Menage Cc: "David S. Miller" Cc: KAMEZAWA Hiroyuki Cc: Lai Jiangshan Signed-off-by: Andrew Morton Signed-off-by: David S. Miller --- net/sched/Kconfig | 5 ++++- net/sched/cls_cgroup.c | 36 +++++++++++++++++++++++++++--------- 2 files changed, 31 insertions(+), 10 deletions(-) diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 21f9c7678aa3..2f691fb180d1 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -328,13 +328,16 @@ config NET_CLS_FLOW module will be called cls_flow. config NET_CLS_CGROUP - bool "Control Group Classifier" + tristate "Control Group Classifier" select NET_CLS depends on CGROUPS ---help--- Say Y here if you want to classify packets based on the control cgroup of their process. + To compile this code as a module, choose M here: the + module will be called cls_cgroup. + config NET_EMATCH bool "Extended Matches" select NET_CLS diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index e4877ca6727c..7f27d2c15e08 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c @@ -24,6 +24,25 @@ struct cgroup_cls_state u32 classid; }; +static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, + struct cgroup *cgrp); +static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp); +static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp); + +struct cgroup_subsys net_cls_subsys = { + .name = "net_cls", + .create = cgrp_create, + .destroy = cgrp_destroy, + .populate = cgrp_populate, +#ifdef CONFIG_NET_CLS_CGROUP + .subsys_id = net_cls_subsys_id, +#else +#define net_cls_subsys_id net_cls_subsys.subsys_id +#endif + .module = THIS_MODULE, +}; + + static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp) { return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id), @@ -79,14 +98,6 @@ static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files)); } -struct cgroup_subsys net_cls_subsys = { - .name = "net_cls", - .create = cgrp_create, - .destroy = cgrp_destroy, - .populate = cgrp_populate, - .subsys_id = net_cls_subsys_id, -}; - struct cls_cgroup_head { u32 handle; @@ -277,12 +288,19 @@ static struct tcf_proto_ops cls_cgroup_ops __read_mostly = { static int __init init_cgroup_cls(void) { - return register_tcf_proto_ops(&cls_cgroup_ops); + int ret = register_tcf_proto_ops(&cls_cgroup_ops); + if (ret) + return ret; + ret = cgroup_load_subsys(&net_cls_subsys); + if (ret) + unregister_tcf_proto_ops(&cls_cgroup_ops); + return ret; } static void __exit exit_cgroup_cls(void) { unregister_tcf_proto_ops(&cls_cgroup_ops); + cgroup_unload_subsys(&net_cls_subsys); } module_init(init_cgroup_cls); -- cgit v1.2.3 From 4881a4f89a95cc5fef6d32953954bcc3443eefd5 Mon Sep 17 00:00:00 2001 From: Jens Rottmann Date: Tue, 23 Mar 2010 04:23:50 +0000 Subject: ksz884x: fix return value of netdev_set_eeprom ksz884x: fix return value of netdev_set_eeprom netdev_set_eeprom() confused ethtool by just returning 1 on error instead of a proper -EINVAL. Signed-off-by: Jens Rottmann Signed-off-by: David S. Miller --- drivers/net/ksz884x.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c index 0f59099ee72f..6c5327af1bf9 100644 --- a/drivers/net/ksz884x.c +++ b/drivers/net/ksz884x.c @@ -6322,7 +6322,7 @@ static int netdev_set_eeprom(struct net_device *dev, int len; if (eeprom->magic != EEPROM_MAGIC) - return 1; + return -EINVAL; len = (eeprom->offset + eeprom->len + 1) / 2; for (i = eeprom->offset / 2; i < len; i++) -- cgit v1.2.3 From 4327ba435a56ada13eedf3eb332e583c7a0586a9 Mon Sep 17 00:00:00 2001 From: Benjamin Li Date: Tue, 23 Mar 2010 13:13:11 +0000 Subject: bnx2: Fix netpoll crash. The bnx2 driver calls netif_napi_add() for all the NAPI structs during ->probe() time but not all of them will be used if we're not in MSI-X mode. This creates a problem for netpoll since it will poll all the NAPI structs in the dev_list whether or not they are scheduled, resulting in a crash when we access structure fields not initialized for that vector. We fix it by moving the netif_napi_add() call to ->open() after the number of IRQ vectors has been determined. Signed-off-by: Benjamin Li Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/bnx2.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 381887ba677c..417de1cb2d46 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -246,6 +246,8 @@ static const struct flash_spec flash_5709 = { MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl); +static void bnx2_init_napi(struct bnx2 *bp); + static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr) { u32 diff; @@ -6197,6 +6199,7 @@ bnx2_open(struct net_device *dev) bnx2_disable_int(bp); bnx2_setup_int_mode(bp, disable_msi); + bnx2_init_napi(bp); bnx2_napi_enable(bp); rc = bnx2_alloc_mem(bp); if (rc) @@ -8207,7 +8210,7 @@ bnx2_init_napi(struct bnx2 *bp) { int i; - for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) { + for (i = 0; i < bp->irq_nvecs; i++) { struct bnx2_napi *bnapi = &bp->bnx2_napi[i]; int (*poll)(struct napi_struct *, int); @@ -8276,7 +8279,6 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->ethtool_ops = &bnx2_ethtool_ops; bp = netdev_priv(dev); - bnx2_init_napi(bp); pci_set_drvdata(pdev, dev); -- cgit v1.2.3 From 1bf1e347ef254ed8a13e7971a30e1bf3983da3d1 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Tue, 23 Mar 2010 13:13:12 +0000 Subject: bnx2: Use proper handler during netpoll. Netpoll needs to call the proper handler depending on the IRQ mode and the vector. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/bnx2.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 417de1cb2d46..a257babd1bb4 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -7646,9 +7646,11 @@ poll_bnx2(struct net_device *dev) int i; for (i = 0; i < bp->irq_nvecs; i++) { - disable_irq(bp->irq_tbl[i].vector); - bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]); - enable_irq(bp->irq_tbl[i].vector); + struct bnx2_irq *irq = &bp->irq_tbl[i]; + + disable_irq(irq->vector); + irq->handler(irq->vector, &bp->bnx2_napi[i]); + enable_irq(irq->vector); } } #endif -- cgit v1.2.3 From fa3d9a6d55014b5bce5575aeab1cf711cff748ab Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Tue, 23 Mar 2010 18:34:38 +0000 Subject: igb: count Rx FIFO errors correctly Don't aggregate rx_no_buffer_count into rx_fifo_errors. RNBC counts packets that get queued temporarily in the adapter's FIFO. These packets are not dropped and are not errors. The correct counter is rx_missed_errors (MPC). Signed-off-by: Mitch Williams Signed-off-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/igb/igb_main.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 45a0e4fd5871..70dc03bb9cb2 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -3963,7 +3963,7 @@ void igb_update_stats(struct igb_adapter *adapter) struct net_device_stats *net_stats = igb_get_stats(adapter->netdev); struct e1000_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; - u32 rnbc, reg; + u32 reg, mpc; u16 phy_tmp; int i; u64 bytes, packets; @@ -4021,7 +4021,9 @@ void igb_update_stats(struct igb_adapter *adapter) adapter->stats.symerrs += rd32(E1000_SYMERRS); adapter->stats.sec += rd32(E1000_SEC); - adapter->stats.mpc += rd32(E1000_MPC); + mpc = rd32(E1000_MPC); + adapter->stats.mpc += mpc; + net_stats->rx_fifo_errors += mpc; adapter->stats.scc += rd32(E1000_SCC); adapter->stats.ecol += rd32(E1000_ECOL); adapter->stats.mcc += rd32(E1000_MCC); @@ -4036,9 +4038,7 @@ void igb_update_stats(struct igb_adapter *adapter) adapter->stats.gptc += rd32(E1000_GPTC); adapter->stats.gotc += rd32(E1000_GOTCL); rd32(E1000_GOTCH); /* clear GOTCL */ - rnbc = rd32(E1000_RNBC); - adapter->stats.rnbc += rnbc; - net_stats->rx_fifo_errors += rnbc; + adapter->stats.rnbc += rd32(E1000_RNBC); adapter->stats.ruc += rd32(E1000_RUC); adapter->stats.rfc += rd32(E1000_RFC); adapter->stats.rjc += rd32(E1000_RJC); -- cgit v1.2.3 From d07f3e375f608e52a1f8958fbde105bb27b7629a Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Tue, 23 Mar 2010 18:34:57 +0000 Subject: igb: do not modify tx_queue_len on link speed change Previously the driver tweaked txqueuelen to avoid false Tx hang reports seen at half duplex. This had the effect of overriding user set values on link change/reset. Testing shows that adjusting only the timeout factor is sufficient to prevent Tx hang reports at half duplex. Based on e1000e patch by Franco Fichtner Signed-off-by: Emil Tantilov Signed-off-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/igb/igb.h | 1 - drivers/net/igb/igb_main.c | 10 +--------- 2 files changed, 1 insertion(+), 10 deletions(-) diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h index a1775705b24c..3b772b822a5d 100644 --- a/drivers/net/igb/igb.h +++ b/drivers/net/igb/igb.h @@ -267,7 +267,6 @@ struct igb_adapter { /* TX */ struct igb_ring *tx_ring[16]; - unsigned long tx_queue_len; u32 tx_timeout_count; /* RX */ diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 70dc03bb9cb2..e72760c2448b 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -1105,9 +1105,6 @@ static void igb_configure(struct igb_adapter *adapter) struct igb_ring *ring = adapter->rx_ring[i]; igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring)); } - - - adapter->tx_queue_len = netdev->tx_queue_len; } /** @@ -1213,7 +1210,6 @@ void igb_down(struct igb_adapter *adapter) del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); - netdev->tx_queue_len = adapter->tx_queue_len; netif_carrier_off(netdev); /* record the stats before reset*/ @@ -3106,17 +3102,13 @@ static void igb_watchdog_task(struct work_struct *work) ((ctrl & E1000_CTRL_RFCE) ? "RX" : ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None"))); - /* tweak tx_queue_len according to speed/duplex and - * adjust the timeout factor */ - netdev->tx_queue_len = adapter->tx_queue_len; + /* adjust timeout factor according to speed/duplex */ adapter->tx_timeout_factor = 1; switch (adapter->link_speed) { case SPEED_10: - netdev->tx_queue_len = 10; adapter->tx_timeout_factor = 14; break; case SPEED_100: - netdev->tx_queue_len = 100; /* maybe add some timeout factor ? */ break; } -- cgit v1.2.3 From 31b24b955c3ebbb6f3008a6374e61cf7c05a193c Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 23 Mar 2010 18:35:18 +0000 Subject: igb: only use vlan_gro_receive if vlans are registered This change makes it so that vlan_gro_receive is only used if vlans have been registered to the adapter structure. Previously we were just sending all vlan tagged frames in via this function but this results in a null pointer dereference when vlans are not registered. [ This fixes bugzilla entry 15582 -Eric Dumazet] Signed-off-by: Alexander Duyck Signed-off-by: Jeff Kirsher Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/igb/igb_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index e72760c2448b..01c65c7447e1 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -5102,7 +5102,7 @@ static void igb_receive_skb(struct igb_q_vector *q_vector, { struct igb_adapter *adapter = q_vector->adapter; - if (vlan_tag) + if (vlan_tag && adapter->vlgrp) vlan_gro_receive(&q_vector->napi, adapter->vlgrp, vlan_tag, skb); else -- cgit v1.2.3 From 669d3e0babb40018dd6e78f4093c13a2eac73866 Mon Sep 17 00:00:00 2001 From: Vasu Dev Date: Tue, 23 Mar 2010 14:41:45 +0000 Subject: vlan: adds vlan_dev_select_queue This is required to correctly select vlan tx queue for a driver supporting multi tx queue with ndo_select_queue implemented since currently selected vlan tx queue is unaligned to selected queue by real net_devce ndo_select_queue. Unaligned vlan tx queue selection causes thrash with higher vlan tx lock contention for least fcoe traffic and wrong socket tx queue_mapping for ixgbe having ndo_select_queue implemented. -v2 As per Eric Dumazet comments, mirrored vlan net_device_ops to have them with and without vlan_dev_select_queue and then select according to real dev ndo_select_queue present or not for a vlan net_device. This is to completely skip vlan_dev_select_queue calling for real net_device not supporting ndo_select_queue. Signed-off-by: Vasu Dev Signed-off-by: Jeff Kirsher Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/8021q/vlan_dev.c | 71 +++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 68 insertions(+), 3 deletions(-) diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 9e83272fc5b0..2fd057c81bbf 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -361,6 +361,14 @@ static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, return ret; } +static u16 vlan_dev_select_queue(struct net_device *dev, struct sk_buff *skb) +{ + struct net_device *rdev = vlan_dev_info(dev)->real_dev; + const struct net_device_ops *ops = rdev->netdev_ops; + + return ops->ndo_select_queue(rdev, skb); +} + static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) { /* TODO: gotta make sure the underlying layer can handle it, @@ -688,7 +696,8 @@ static const struct header_ops vlan_header_ops = { .parse = eth_header_parse, }; -static const struct net_device_ops vlan_netdev_ops, vlan_netdev_accel_ops; +static const struct net_device_ops vlan_netdev_ops, vlan_netdev_accel_ops, + vlan_netdev_ops_sq, vlan_netdev_accel_ops_sq; static int vlan_dev_init(struct net_device *dev) { @@ -722,11 +731,17 @@ static int vlan_dev_init(struct net_device *dev) if (real_dev->features & NETIF_F_HW_VLAN_TX) { dev->header_ops = real_dev->header_ops; dev->hard_header_len = real_dev->hard_header_len; - dev->netdev_ops = &vlan_netdev_accel_ops; + if (real_dev->netdev_ops->ndo_select_queue) + dev->netdev_ops = &vlan_netdev_accel_ops_sq; + else + dev->netdev_ops = &vlan_netdev_accel_ops; } else { dev->header_ops = &vlan_header_ops; dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN; - dev->netdev_ops = &vlan_netdev_ops; + if (real_dev->netdev_ops->ndo_select_queue) + dev->netdev_ops = &vlan_netdev_ops_sq; + else + dev->netdev_ops = &vlan_netdev_ops; } if (is_vlan_dev(real_dev)) @@ -865,6 +880,56 @@ static const struct net_device_ops vlan_netdev_accel_ops = { #endif }; +static const struct net_device_ops vlan_netdev_ops_sq = { + .ndo_select_queue = vlan_dev_select_queue, + .ndo_change_mtu = vlan_dev_change_mtu, + .ndo_init = vlan_dev_init, + .ndo_uninit = vlan_dev_uninit, + .ndo_open = vlan_dev_open, + .ndo_stop = vlan_dev_stop, + .ndo_start_xmit = vlan_dev_hard_start_xmit, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = vlan_dev_set_mac_address, + .ndo_set_rx_mode = vlan_dev_set_rx_mode, + .ndo_set_multicast_list = vlan_dev_set_rx_mode, + .ndo_change_rx_flags = vlan_dev_change_rx_flags, + .ndo_do_ioctl = vlan_dev_ioctl, + .ndo_neigh_setup = vlan_dev_neigh_setup, + .ndo_get_stats = vlan_dev_get_stats, +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) + .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, + .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, + .ndo_fcoe_enable = vlan_dev_fcoe_enable, + .ndo_fcoe_disable = vlan_dev_fcoe_disable, + .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn, +#endif +}; + +static const struct net_device_ops vlan_netdev_accel_ops_sq = { + .ndo_select_queue = vlan_dev_select_queue, + .ndo_change_mtu = vlan_dev_change_mtu, + .ndo_init = vlan_dev_init, + .ndo_uninit = vlan_dev_uninit, + .ndo_open = vlan_dev_open, + .ndo_stop = vlan_dev_stop, + .ndo_start_xmit = vlan_dev_hwaccel_hard_start_xmit, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = vlan_dev_set_mac_address, + .ndo_set_rx_mode = vlan_dev_set_rx_mode, + .ndo_set_multicast_list = vlan_dev_set_rx_mode, + .ndo_change_rx_flags = vlan_dev_change_rx_flags, + .ndo_do_ioctl = vlan_dev_ioctl, + .ndo_neigh_setup = vlan_dev_neigh_setup, + .ndo_get_stats = vlan_dev_get_stats, +#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) + .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, + .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, + .ndo_fcoe_enable = vlan_dev_fcoe_enable, + .ndo_fcoe_disable = vlan_dev_fcoe_disable, + .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn, +#endif +}; + void vlan_setup(struct net_device *dev) { ether_setup(dev); -- cgit v1.2.3 From f6b9f4b263f3178fc0f23f0e67d04386528cc727 Mon Sep 17 00:00:00 2001 From: Vasu Dev Date: Tue, 23 Mar 2010 14:42:05 +0000 Subject: vlan: updates vlan real_num_tx_queues Updates real_num_tx_queues in case underlying real device has changed real_num_tx_queues. -v2 As per Eric Dumazet comment:- -- adds BUG_ON to catch case of real_num_tx_queues exceeding num_tx_queues. -- created this self contained patch to just update real_num_tx_queues. Signed-off-by: Vasu Dev Signed-off-by: Jeff Kirsher Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/8021q/vlan.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 453512266ea1..db783d7af5a3 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -378,6 +378,8 @@ static void vlan_transfer_features(struct net_device *dev, #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid; #endif + vlandev->real_num_tx_queues = dev->real_num_tx_queues; + BUG_ON(vlandev->real_num_tx_queues > vlandev->num_tx_queues); if (old_features != vlandev->features) netdev_features_change(vlandev); -- cgit v1.2.3 From 03e6d819c2cb2cc8ce5642669a0a7c72336ee7a2 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 23 Mar 2010 20:40:50 +0000 Subject: skbuff: remove unused dma_head & dma_maps fields The dma map fields in the skb_shared_info structure no longer has any users and can be dropped since it is making the skb_shared_info unecessarily larger. Running slabtop show that we were using 4K slabs for the skb->head on x86_64 w/ an allocation size of 1522. It turns out that the dma_head and dma_maps array made skb_shared large enough that we had crossed over the 2k boundary with standard frames and as such we were using 4k blocks of memory for all skbs. Signed-off-by: Alexander Duyck Signed-off-by: Jeff Kirsher Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/skbuff.h | 6 ------ 1 file changed, 6 deletions(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 03f816a9b659..124f90cd5a38 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -190,9 +190,6 @@ struct skb_shared_info { atomic_t dataref; unsigned short nr_frags; unsigned short gso_size; -#ifdef CONFIG_HAS_DMA - dma_addr_t dma_head; -#endif /* Warning: this field is not always filled in (UFO)! */ unsigned short gso_segs; unsigned short gso_type; @@ -201,9 +198,6 @@ struct skb_shared_info { struct sk_buff *frag_list; struct skb_shared_hwtstamps hwtstamps; skb_frag_t frags[MAX_SKB_FRAGS]; -#ifdef CONFIG_HAS_DMA - dma_addr_t dma_maps[MAX_SKB_FRAGS]; -#endif /* Intermediate layers must ensure that destructor_arg * remains valid until skb destructor */ void * destructor_arg; -- cgit v1.2.3 From 9a127aad4d60968fba96622008ea0d243688f2b0 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 24 Mar 2010 01:47:00 +0000 Subject: af_key: return error if pfkey_xfrm_policy2msg_prep() fails The original code saved the error value but just returned 0 in the end. Signed-off-by: Dan Carpenter Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- net/key/af_key.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/net/key/af_key.c b/net/key/af_key.c index 368707882647..344145f23c34 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -2129,10 +2129,9 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c int err; out_skb = pfkey_xfrm_policy2msg_prep(xp); - if (IS_ERR(out_skb)) { - err = PTR_ERR(out_skb); - goto out; - } + if (IS_ERR(out_skb)) + return PTR_ERR(out_skb); + err = pfkey_xfrm_policy2msg(out_skb, xp, dir); if (err < 0) return err; @@ -2148,7 +2147,6 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c out_hdr->sadb_msg_seq = c->seq; out_hdr->sadb_msg_pid = c->pid; pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp)); -out: return 0; } -- cgit v1.2.3 From 55e0d7cf279177dfe320f54816320558bc370f24 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 25 Mar 2010 11:00:22 +0100 Subject: netfilter: xt_hashlimit: dl_seq_stop() fix If dl_seq_start() memory allocation fails, we crash later in dl_seq_stop(), trying to kfree(ERR_PTR(-ENOMEM)) Signed-off-by: Eric Dumazet Signed-off-by: Patrick McHardy --- net/netfilter/xt_hashlimit.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index 9e9c48963942..70d561a2d9e0 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c @@ -879,7 +879,8 @@ static void dl_seq_stop(struct seq_file *s, void *v) struct xt_hashlimit_htable *htable = s->private; unsigned int *bucket = (unsigned int *)v; - kfree(bucket); + if (!IS_ERR(bucket)) + kfree(bucket); spin_unlock_bh(&htable->lock); } -- cgit v1.2.3 From 9c13886665c43600bd0af4b38e33c654e648e078 Mon Sep 17 00:00:00 2001 From: Jozsef Kadlecsik Date: Thu, 25 Mar 2010 11:17:26 +0100 Subject: netfilter: ip6table_raw: fix table priority The order of the IPv6 raw table is currently reversed, that makes impossible to use the NOTRACK target in IPv6: for example if someone enters ip6tables -t raw -A PREROUTING -p tcp --dport 80 -j NOTRACK and if we receive fragmented packets then the first fragment will be untracked and thus skip nf_ct_frag6_gather (and conntrack), while all subsequent fragments enter nf_ct_frag6_gather and reassembly will never successfully be finished. Singed-off-by: Jozsef Kadlecsik Signed-off-by: Patrick McHardy --- include/linux/netfilter_ipv6.h | 1 + net/ipv6/netfilter/ip6table_raw.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index d654873aa25a..1f7e300094cd 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h @@ -59,6 +59,7 @@ enum nf_ip6_hook_priorities { NF_IP6_PRI_FIRST = INT_MIN, NF_IP6_PRI_CONNTRACK_DEFRAG = -400, + NF_IP6_PRI_RAW = -300, NF_IP6_PRI_SELINUX_FIRST = -225, NF_IP6_PRI_CONNTRACK = -200, NF_IP6_PRI_MANGLE = -150, diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c index aef31a29de9e..b9cf7cd61923 100644 --- a/net/ipv6/netfilter/ip6table_raw.c +++ b/net/ipv6/netfilter/ip6table_raw.c @@ -13,7 +13,7 @@ static const struct xt_table packet_raw = { .valid_hooks = RAW_VALID_HOOKS, .me = THIS_MODULE, .af = NFPROTO_IPV6, - .priority = NF_IP6_PRI_FIRST, + .priority = NF_IP6_PRI_RAW, }; /* The work comes in here from netfilter.c. */ -- cgit v1.2.3 From 8f5992291457c8e6de2f5fe39849de6756be1a96 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 25 Mar 2010 17:25:11 +0100 Subject: netfilter: xt_hashlimit: IPV6 bugfix A missing break statement in hashlimit_ipv6_mask(), and masks between /64 and /95 are not working at all... Signed-off-by: Eric Dumazet Signed-off-by: Patrick McHardy --- net/netfilter/xt_hashlimit.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index 70d561a2d9e0..215a64835de8 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c @@ -493,6 +493,7 @@ static void hashlimit_ipv6_mask(__be32 *i, unsigned int p) case 64 ... 95: i[2] = maskl(i[2], p - 64); i[3] = 0; + break; case 96 ... 127: i[3] = maskl(i[3], p - 96); break; -- cgit v1.2.3 From bcbe53682f65330bdd9ad7eed9575d2ff536353a Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 22 Mar 2010 19:59:47 -0700 Subject: via-velocity: Fix FLOW_CNTL_TX_RX handling in set_mii_flow_control() Clear, don't set, ANAR_ASMDIR in this case. Noticed by Roel Kluin. Signed-off-by: David S. Miller --- drivers/net/via-velocity.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index 3a486f3bad3d..bc278d4ee89d 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -812,7 +812,7 @@ static void set_mii_flow_control(struct velocity_info *vptr) case FLOW_CNTL_TX_RX: MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); - MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); + MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); break; case FLOW_CNTL_DISABLE: -- cgit v1.2.3 From 93b39a0dba6a15c35a582b9e8b171b8a6ec971aa Mon Sep 17 00:00:00 2001 From: Henne Date: Thu, 25 Mar 2010 12:05:29 +0000 Subject: isdn: Cleanup Sections in PCMCIA driver sedlbauer Compiling this driver gave a section mismatch, so I reviewed the init/exit paths of the driver and made the correct changes. WARNING: drivers/isdn/hisax/built-in.o(.text+0x558d6): Section mismatch in reference from the function sedlbauer_config() to the function .devinit.text:hisax_init_pcmcia() The function sedlbauer_config() references the function __devinit hisax_init_pcmcia(). This is often because sedlbauer_config lacks a __devinit annotation or the annotation of hisax_init_pcmcia is wrong. Signed-off-by: Henrik Kretzschmar Acked-by: Karsten Keil Signed-off-by: David S. Miller --- drivers/isdn/hisax/sedlbauer_cs.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/isdn/hisax/sedlbauer_cs.c b/drivers/isdn/hisax/sedlbauer_cs.c index 7836ec3c7f86..71b3ddef03bb 100644 --- a/drivers/isdn/hisax/sedlbauer_cs.c +++ b/drivers/isdn/hisax/sedlbauer_cs.c @@ -76,7 +76,7 @@ module_param(protocol, int, 0); event handler. */ -static int sedlbauer_config(struct pcmcia_device *link); +static int sedlbauer_config(struct pcmcia_device *link) __devinit ; static void sedlbauer_release(struct pcmcia_device *link); /* @@ -85,7 +85,7 @@ static void sedlbauer_release(struct pcmcia_device *link); needed to manage one actual PCMCIA card. */ -static void sedlbauer_detach(struct pcmcia_device *p_dev); +static void sedlbauer_detach(struct pcmcia_device *p_dev) __devexit; /* You'll also need to prototype all the functions that will actually @@ -129,7 +129,7 @@ typedef struct local_info_t { ======================================================================*/ -static int sedlbauer_probe(struct pcmcia_device *link) +static int __devinit sedlbauer_probe(struct pcmcia_device *link) { local_info_t *local; @@ -177,7 +177,7 @@ static int sedlbauer_probe(struct pcmcia_device *link) ======================================================================*/ -static void sedlbauer_detach(struct pcmcia_device *link) +static void __devexit sedlbauer_detach(struct pcmcia_device *link) { dev_dbg(&link->dev, "sedlbauer_detach(0x%p)\n", link); @@ -283,7 +283,7 @@ static int sedlbauer_config_check(struct pcmcia_device *p_dev, -static int sedlbauer_config(struct pcmcia_device *link) +static int __devinit sedlbauer_config(struct pcmcia_device *link) { local_info_t *dev = link->priv; win_req_t *req; @@ -441,7 +441,7 @@ static struct pcmcia_driver sedlbauer_driver = { .name = "sedlbauer_cs", }, .probe = sedlbauer_probe, - .remove = sedlbauer_detach, + .remove = __devexit_p(sedlbauer_detach), .id_table = sedlbauer_ids, .suspend = sedlbauer_suspend, .resume = sedlbauer_resume, -- cgit v1.2.3 From 158e33d1c6d0c6bacf577bcb47591aa4293dfcb1 Mon Sep 17 00:00:00 2001 From: Henne Date: Thu, 25 Mar 2010 12:05:30 +0000 Subject: isdn: Cleanup Sections in PCMCIA driver teles Compiling this driver gave a section mismatch, so I reviewed the init/exit paths of the driver and made the correct changes. WARNING: drivers/isdn/hisax/built-in.o(.text+0x56bfb): Section mismatch in reference from the function teles_cs_config() to the function .devinit.text:hisax_init_pcmcia() The function teles_cs_config() references the function __devinit hisax_init_pcmcia(). This is often because teles_cs_config lacks a __devinit annotation or the annotation of hisax_init_pcmcia is wrong. Signed-off-by: Henrik Kretzschmar Acked-by: Karsten Keil Signed-off-by: David S. Miller --- drivers/isdn/hisax/teles_cs.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/isdn/hisax/teles_cs.c b/drivers/isdn/hisax/teles_cs.c index b0c5976cbdb3..d010a0da8e19 100644 --- a/drivers/isdn/hisax/teles_cs.c +++ b/drivers/isdn/hisax/teles_cs.c @@ -57,7 +57,7 @@ module_param(protocol, int, 0); handler. */ -static int teles_cs_config(struct pcmcia_device *link); +static int teles_cs_config(struct pcmcia_device *link) __devinit ; static void teles_cs_release(struct pcmcia_device *link); /* @@ -66,7 +66,7 @@ static void teles_cs_release(struct pcmcia_device *link); needed to manage one actual PCMCIA card. */ -static void teles_detach(struct pcmcia_device *p_dev); +static void teles_detach(struct pcmcia_device *p_dev) __devexit ; /* A linked list of "instances" of the teles_cs device. Each actual @@ -112,7 +112,7 @@ typedef struct local_info_t { ======================================================================*/ -static int teles_probe(struct pcmcia_device *link) +static int __devinit teles_probe(struct pcmcia_device *link) { local_info_t *local; @@ -156,7 +156,7 @@ static int teles_probe(struct pcmcia_device *link) ======================================================================*/ -static void teles_detach(struct pcmcia_device *link) +static void __devexit teles_detach(struct pcmcia_device *link) { local_info_t *info = link->priv; @@ -200,7 +200,7 @@ static int teles_cs_configcheck(struct pcmcia_device *p_dev, return -ENODEV; } -static int teles_cs_config(struct pcmcia_device *link) +static int __devinit teles_cs_config(struct pcmcia_device *link) { local_info_t *dev; int i; @@ -319,7 +319,7 @@ static struct pcmcia_driver teles_cs_driver = { .name = "teles_cs", }, .probe = teles_probe, - .remove = teles_detach, + .remove = __devexit_p(teles_detach), .id_table = teles_ids, .suspend = teles_suspend, .resume = teles_resume, -- cgit v1.2.3 From a465870a808bccba63bf6da30a0b56a2a7abfa5c Mon Sep 17 00:00:00 2001 From: Henne Date: Thu, 25 Mar 2010 12:05:31 +0000 Subject: isdn: Cleanup Sections in PCMCIA driver avma1 Compiling this driver gave a section mismatch, so I reviewed the init/exit paths of the driver and made the correct changes. WARNING: drivers/isdn/hisax/built-in.o(.text+0x56512): Section mismatch in reference from the function avma1cs_config() to the function .devinit.text:hisax_init_pcmcia() The function avma1cs_config() references the function __devinit hisax_init_pcmcia(). This is often because avma1cs_config lacks a __devinit annotation or the annotation of hisax_init_pcmcia is wrong. Signed-off-by: Henrik Kretzschmar Acked-by: Karsten Keil Signed-off-by: David S. Miller --- drivers/isdn/hisax/avma1_cs.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/isdn/hisax/avma1_cs.c b/drivers/isdn/hisax/avma1_cs.c index e5deb15cf40c..8d1d63a02b34 100644 --- a/drivers/isdn/hisax/avma1_cs.c +++ b/drivers/isdn/hisax/avma1_cs.c @@ -50,7 +50,7 @@ module_param(isdnprot, int, 0); handler. */ -static int avma1cs_config(struct pcmcia_device *link); +static int avma1cs_config(struct pcmcia_device *link) __devinit ; static void avma1cs_release(struct pcmcia_device *link); /* @@ -59,7 +59,7 @@ static void avma1cs_release(struct pcmcia_device *link); needed to manage one actual PCMCIA card. */ -static void avma1cs_detach(struct pcmcia_device *p_dev); +static void avma1cs_detach(struct pcmcia_device *p_dev) __devexit ; /* @@ -99,7 +99,7 @@ typedef struct local_info_t { ======================================================================*/ -static int avma1cs_probe(struct pcmcia_device *p_dev) +static int __devinit avma1cs_probe(struct pcmcia_device *p_dev) { local_info_t *local; @@ -140,7 +140,7 @@ static int avma1cs_probe(struct pcmcia_device *p_dev) ======================================================================*/ -static void avma1cs_detach(struct pcmcia_device *link) +static void __devexit avma1cs_detach(struct pcmcia_device *link) { dev_dbg(&link->dev, "avma1cs_detach(0x%p)\n", link); avma1cs_release(link); @@ -174,7 +174,7 @@ static int avma1cs_configcheck(struct pcmcia_device *p_dev, } -static int avma1cs_config(struct pcmcia_device *link) +static int __devinit avma1cs_config(struct pcmcia_device *link) { local_info_t *dev; int i; @@ -282,7 +282,7 @@ static struct pcmcia_driver avma1cs_driver = { .name = "avma1_cs", }, .probe = avma1cs_probe, - .remove = avma1cs_detach, + .remove = __devexit_p(avma1cs_detach), .id_table = avma1cs_ids, }; -- cgit v1.2.3 From f61bb62e3ed7634fe5b7dfd8c9a52e6b799f4023 Mon Sep 17 00:00:00 2001 From: Henne Date: Thu, 25 Mar 2010 12:05:32 +0000 Subject: isdn: Cleanup Sections in PCMCIA driver elsa Compiling this driver gave a section mismatch, so I reviewed the init/exit paths of the driver and made the correct changes. WARNING: drivers/isdn/hisax/built-in.o(.text+0x55e37): Section mismatch in reference from the function elsa_cs_config() to the function .devinit.text:hisax_init_pcmcia() The function elsa_cs_config() references the function __devinit hisax_init_pcmcia(). This is often because elsa_cs_config lacks a __devinit annotation or the annotation of hisax_init_pcmcia is wrong. Signed-off-by: Henrik Kretzschmar Acked-by: Karsten Keil Signed-off-by: David S. Miller --- drivers/isdn/hisax/elsa_cs.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/isdn/hisax/elsa_cs.c b/drivers/isdn/hisax/elsa_cs.c index c9a30b1c9237..c9f2279e21f5 100644 --- a/drivers/isdn/hisax/elsa_cs.c +++ b/drivers/isdn/hisax/elsa_cs.c @@ -76,7 +76,7 @@ module_param(protocol, int, 0); handler. */ -static int elsa_cs_config(struct pcmcia_device *link); +static int elsa_cs_config(struct pcmcia_device *link) __devinit ; static void elsa_cs_release(struct pcmcia_device *link); /* @@ -85,7 +85,7 @@ static void elsa_cs_release(struct pcmcia_device *link); needed to manage one actual PCMCIA card. */ -static void elsa_cs_detach(struct pcmcia_device *p_dev); +static void elsa_cs_detach(struct pcmcia_device *p_dev) __devexit; /* A driver needs to provide a dev_node_t structure for each device @@ -121,7 +121,7 @@ typedef struct local_info_t { ======================================================================*/ -static int elsa_cs_probe(struct pcmcia_device *link) +static int __devinit elsa_cs_probe(struct pcmcia_device *link) { local_info_t *local; @@ -166,7 +166,7 @@ static int elsa_cs_probe(struct pcmcia_device *link) ======================================================================*/ -static void elsa_cs_detach(struct pcmcia_device *link) +static void __devexit elsa_cs_detach(struct pcmcia_device *link) { local_info_t *info = link->priv; @@ -210,7 +210,7 @@ static int elsa_cs_configcheck(struct pcmcia_device *p_dev, return -ENODEV; } -static int elsa_cs_config(struct pcmcia_device *link) +static int __devinit elsa_cs_config(struct pcmcia_device *link) { local_info_t *dev; int i; @@ -327,7 +327,7 @@ static struct pcmcia_driver elsa_cs_driver = { .name = "elsa_cs", }, .probe = elsa_cs_probe, - .remove = elsa_cs_detach, + .remove = __devexit_p(elsa_cs_detach), .id_table = elsa_ids, .suspend = elsa_suspend, .resume = elsa_resume, -- cgit v1.2.3 From 7c9f757319ccf7a47ce167b86eda671c87b5b917 Mon Sep 17 00:00:00 2001 From: Jon Maloy Date: Thu, 25 Mar 2010 12:40:09 -0700 Subject: TIPC: Removed inactive maintainer Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- MAINTAINERS | 1 - 1 file changed, 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index 449d44402083..5c99bd6cc2c1 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5423,7 +5423,6 @@ S: Maintained F: sound/soc/codecs/twl4030* TIPC NETWORK LAYER -M: Per Liden M: Jon Maloy M: Allan Stephens L: tipc-discussion@lists.sourceforge.net -- cgit v1.2.3 From 3da0ae6298fa10d4f5e2855ae400b2470bc6693d Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 25 Mar 2010 20:32:39 -0700 Subject: isdn: Add netdev to lists in MAINTAINERS entry. Signed-off-by: David S. Miller --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS b/MAINTAINERS index 5c99bd6cc2c1..9ff6341e43ba 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3083,6 +3083,7 @@ F: include/scsi/*iscsi* ISDN SUBSYSTEM M: Karsten Keil L: isdn4linux@listserv.isdn4linux.de (subscribers-only) +L: netdev@vger.kernel.org W: http://www.isdn4linux.de T: git git://git.kernel.org/pub/scm/linux/kernel/git/kkeil/isdn-2.6.git S: Maintained -- cgit v1.2.3 From 4300e8c7f64d95a80ffa7d98d98738f41546bc30 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 26 Mar 2010 10:23:30 -0700 Subject: Revert "r8169: enable 64-bit DMA by default for PCI Express devices (v2)" This reverts commit 353176888386d9025062a12dcec08d49af10cf2c. People are reporting problems due to this change and there is no anticipation that the cause will be tracked down any time soon. We can try next time to selectively re-enable this based upon chip type, or have a black list of some sort. Signed-off-by: David S. Miller --- drivers/net/r8169.c | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 9d3ebf3e975e..b93fd23b9f0d 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -187,7 +187,7 @@ static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = { MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl); static int rx_copybreak = 200; -static int use_dac = -1; +static int use_dac; static struct { u32 msg_enable; } debug = { -1 }; @@ -511,8 +511,7 @@ MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver"); module_param(rx_copybreak, int, 0); MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); module_param(use_dac, int, 0); -MODULE_PARM_DESC(use_dac, "Enable PCI DAC. -1 defaults on for PCI Express only." -" Unsafe on 32 bit PCI slot."); +MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); module_param_named(debug, debug.msg_enable, int, 0); MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); MODULE_LICENSE("GPL"); @@ -2974,7 +2973,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) void __iomem *ioaddr; unsigned int i; int rc; - int this_use_dac = use_dac; if (netif_msg_drv(&debug)) { printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n", @@ -3040,17 +3038,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->cp_cmd = PCIMulRW | RxChkSum; - tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); - if (!tp->pcie_cap) - netif_info(tp, probe, dev, "no PCI Express capability\n"); - - if (this_use_dac < 0) - this_use_dac = tp->pcie_cap != 0; - if ((sizeof(dma_addr_t) > 4) && - this_use_dac && - !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { - netif_info(tp, probe, dev, "using 64-bit DMA\n"); + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { tp->cp_cmd |= PCIDAC; dev->features |= NETIF_F_HIGHDMA; } else { @@ -3069,6 +3058,10 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_free_res_4; } + tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (!tp->pcie_cap) + netif_info(tp, probe, dev, "no PCI Express capability\n"); + RTL_W16(IntrMask, 0x0000); /* Soft reset the chip. */ -- cgit v1.2.3 From bb2792e0383793d5135ba777e93f0a918371394b Mon Sep 17 00:00:00 2001 From: Amit Kumar Salecha Date: Fri, 26 Mar 2010 00:30:07 +0000 Subject: netxen: fix bios version calculation Bios sub version from unified fw image is calculated incorrect. Signed-off-by: Amit Kumar Salecha Signed-off-by: David S. Miller --- drivers/net/netxen/netxen_nic_init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index 1c63610ead42..7eb925a9f36e 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c @@ -761,7 +761,7 @@ nx_get_bios_version(struct netxen_adapter *adapter) if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) { bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off]) + NX_UNI_BIOS_VERSION_OFF)); - return (bios_ver << 24) + ((bios_ver >> 8) & 0xff00) + + return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24); } else return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]); -- cgit v1.2.3 From 77c553900c58c3e4f475e233ad4ff6aeb282deb4 Mon Sep 17 00:00:00 2001 From: Amit Kumar Salecha Date: Fri, 26 Mar 2010 00:30:08 +0000 Subject: netxen: fix warning in ioaddr for NX3031 chip Signed-off-by: Amit Kumar Salecha crb_intr_mask/crb_sts_consumer is predefined for NX2031 not for NX3031. For NX3031, these values get defined in rx context creation. Signed-off-by: David S. Miller --- drivers/net/netxen/netxen_nic_ctx.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c index 2a8ef5fc9663..f26e54716c88 100644 --- a/drivers/net/netxen/netxen_nic_ctx.c +++ b/drivers/net/netxen/netxen_nic_ctx.c @@ -669,13 +669,15 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter) } sds_ring->desc_head = (struct status_desc *)addr; - sds_ring->crb_sts_consumer = - netxen_get_ioaddr(adapter, - recv_crb_registers[port].crb_sts_consumer[ring]); + if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + sds_ring->crb_sts_consumer = + netxen_get_ioaddr(adapter, + recv_crb_registers[port].crb_sts_consumer[ring]); - sds_ring->crb_intr_mask = - netxen_get_ioaddr(adapter, - recv_crb_registers[port].sw_int_mask[ring]); + sds_ring->crb_intr_mask = + netxen_get_ioaddr(adapter, + recv_crb_registers[port].sw_int_mask[ring]); + } } -- cgit v1.2.3 From afbe5cd6c40e0f20fa8832d17fa44ae605591ce1 Mon Sep 17 00:00:00 2001 From: Amit Kumar Salecha Date: Fri, 26 Mar 2010 00:30:09 +0000 Subject: netxen: added sanity check for pci map Signed-off-by: Amit Kumar Salecha Return value of ioremap is not checked, NULL check added. Signed-off-by: Amit Kumar Salecha Signed-off-by: David S. Miller --- drivers/net/netxen/netxen_nic_main.c | 45 +++++++++++++++++++++--------------- 1 file changed, 27 insertions(+), 18 deletions(-) diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 9a7a0f3c36c4..01808b28d1b6 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c @@ -604,16 +604,14 @@ netxen_cleanup_pci_map(struct netxen_adapter *adapter) static int netxen_setup_pci_map(struct netxen_adapter *adapter) { - void __iomem *mem_ptr0 = NULL; - void __iomem *mem_ptr1 = NULL; - void __iomem *mem_ptr2 = NULL; void __iomem *db_ptr = NULL; resource_size_t mem_base, db_base; - unsigned long mem_len, db_len = 0, pci_len0 = 0; + unsigned long mem_len, db_len = 0; struct pci_dev *pdev = adapter->pdev; int pci_func = adapter->ahw.pci_func; + struct netxen_hardware_context *ahw = &adapter->ahw; int err = 0; @@ -630,24 +628,40 @@ netxen_setup_pci_map(struct netxen_adapter *adapter) /* 128 Meg of memory */ if (mem_len == NETXEN_PCI_128MB_SIZE) { - mem_ptr0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE); - mem_ptr1 = ioremap(mem_base + SECOND_PAGE_GROUP_START, + + ahw->pci_base0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE); + ahw->pci_base1 = ioremap(mem_base + SECOND_PAGE_GROUP_START, SECOND_PAGE_GROUP_SIZE); - mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START, + ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE); - pci_len0 = FIRST_PAGE_GROUP_SIZE; + if (ahw->pci_base0 == NULL || ahw->pci_base1 == NULL || + ahw->pci_base2 == NULL) { + dev_err(&pdev->dev, "failed to map PCI bar 0\n"); + err = -EIO; + goto err_out; + } + + ahw->pci_len0 = FIRST_PAGE_GROUP_SIZE; + } else if (mem_len == NETXEN_PCI_32MB_SIZE) { - mem_ptr1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE); - mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START - + + ahw->pci_base1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE); + ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START - SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE); + if (ahw->pci_base1 == NULL || ahw->pci_base2 == NULL) { + dev_err(&pdev->dev, "failed to map PCI bar 0\n"); + err = -EIO; + goto err_out; + } + } else if (mem_len == NETXEN_PCI_2MB_SIZE) { - mem_ptr0 = pci_ioremap_bar(pdev, 0); - if (mem_ptr0 == NULL) { + ahw->pci_base0 = pci_ioremap_bar(pdev, 0); + if (ahw->pci_base0 == NULL) { dev_err(&pdev->dev, "failed to map PCI bar 0\n"); return -EIO; } - pci_len0 = mem_len; + ahw->pci_len0 = mem_len; } else { return -EIO; } @@ -656,11 +670,6 @@ netxen_setup_pci_map(struct netxen_adapter *adapter) dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20)); - adapter->ahw.pci_base0 = mem_ptr0; - adapter->ahw.pci_len0 = pci_len0; - adapter->ahw.pci_base1 = mem_ptr1; - adapter->ahw.pci_base2 = mem_ptr2; - if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) { adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter, NETXEN_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func))); -- cgit v1.2.3 From 48c11a59c4c1d9926be34920d45da037516eb7b8 Mon Sep 17 00:00:00 2001 From: Amit Kumar Salecha Date: Fri, 26 Mar 2010 00:30:10 +0000 Subject: netxen: update version to 4.0.73 Signed-off-by: Amit Kumar Salecha Signed-off-by: David S. Miller --- drivers/net/netxen/netxen_nic.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index 144d2e880422..0f703838e21a 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h @@ -53,8 +53,8 @@ #define _NETXEN_NIC_LINUX_MAJOR 4 #define _NETXEN_NIC_LINUX_MINOR 0 -#define _NETXEN_NIC_LINUX_SUBVERSION 72 -#define NETXEN_NIC_LINUX_VERSIONID "4.0.72" +#define _NETXEN_NIC_LINUX_SUBVERSION 73 +#define NETXEN_NIC_LINUX_VERSIONID "4.0.73" #define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) #define _major(v) (((v) >> 24) & 0xff) -- cgit v1.2.3 From 65deeed7b34bc5b8d3cbff495e8fa2ae7b563480 Mon Sep 17 00:00:00 2001 From: Greg Rose Date: Wed, 24 Mar 2010 09:35:42 +0000 Subject: ixgbevf: Fix signed/unsigned int error In the Tx mapping function if a DMA error occurred then the unwind of previously mapped sections would improperly check an unsigned int if it was less than zero. Changed the index variable to signed to avoid the error. Signed-off-by: Greg Rose Signed-off-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/ixgbevf/ixgbevf_main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c index d6cbd943a6f0..1bbbef3ee3f4 100644 --- a/drivers/net/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ixgbevf/ixgbevf_main.c @@ -2943,9 +2943,10 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter, struct ixgbevf_tx_buffer *tx_buffer_info; unsigned int len; unsigned int total = skb->len; - unsigned int offset = 0, size, count = 0, i; + unsigned int offset = 0, size, count = 0; unsigned int nr_frags = skb_shinfo(skb)->nr_frags; unsigned int f; + int i; i = tx_ring->next_to_use; -- cgit v1.2.3 From 5809a1ae77721931ca7bd7aeacb37fdabe6f07c0 Mon Sep 17 00:00:00 2001 From: Greg Rose Date: Wed, 24 Mar 2010 09:36:08 +0000 Subject: ixgbe: In SR-IOV mode insert delay before bring the adapter up VFs running in guest VMs do not respond in as timely a manner to PF indication it is going down as they do when running in the host domain. If the adapter is in SR-IOV mode insert a two second delay to guarantee that all VFs have had time to respond to the PF reset. In any case resetting the PF while VFs are active should be discouraged but if it must be done then there will be a two second delay to help synchronize resets among the PF and all the VFs. Signed-off-by: Greg Rose Signed-off-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/ixgbe/ixgbe_main.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index d75c46ff31f6..d2cda9e5963f 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -3056,6 +3056,14 @@ void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) msleep(1); ixgbe_down(adapter); + /* + * If SR-IOV enabled then wait a bit before bringing the adapter + * back up to give the VFs time to respond to the reset. The + * two second wait is based upon the watchdog timer cycle in + * the VF driver. + */ + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + msleep(2000); ixgbe_up(adapter); clear_bit(__IXGBE_RESETTING, &adapter->state); } -- cgit v1.2.3 From 581d1aa777580c1c22169538ffb46676b13c408e Mon Sep 17 00:00:00 2001 From: Greg Rose Date: Wed, 24 Mar 2010 09:36:27 +0000 Subject: ixgbe: Change where clear_to_send_flag is reset to zero. The clear_to_send flag is being cleared before the call to ping all the VFs. It should be called after pinging all the VFs. Signed-off-by: Greg Rose Signed-off-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/ixgbe/ixgbe_main.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index d2cda9e5963f..1066d53e102c 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -3244,13 +3244,15 @@ void ixgbe_down(struct ixgbe_adapter *adapter) /* disable receive for all VFs and wait one second */ if (adapter->num_vfs) { - for (i = 0 ; i < adapter->num_vfs; i++) - adapter->vfinfo[i].clear_to_send = 0; - /* ping all the active vfs to let them know we are going down */ ixgbe_ping_all_vfs(adapter); + /* Disable all VFTE/VFRE TX/RX */ ixgbe_disable_tx_rx(adapter); + + /* Mark all the VFs as inactive */ + for (i = 0 ; i < adapter->num_vfs; i++) + adapter->vfinfo[i].clear_to_send = 0; } /* disable receives */ -- cgit v1.2.3 From e0fce6950b822aba7840d82c2d2018f1e1b8276b Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Wed, 24 Mar 2010 10:01:45 +0000 Subject: ixgbe: cleanup maximum number of tx queues In the last patch I missed an unecessary min_t comparison. This patch removes it, the path allocates at most 72 tx queues for 82599 and 24 for 82598 there is no need for this check. Additionally this sets MAX_[TX|RX]_QUEUES to 72. Which is used as the size for the tx/rx_ring arrays. There is no reason to have more tx_rings/rx_rings then num_tx_queues. Signed-off-by: John Fastabend Signed-off-by: Jeff Kirsher Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/ixgbe/ixgbe.h | 7 +++++-- drivers/net/ixgbe/ixgbe_main.c | 1 - 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 19e94ee155a2..79c35ae3718c 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -204,14 +204,17 @@ enum ixgbe_ring_f_enum { #define IXGBE_MAX_FDIR_INDICES 64 #ifdef IXGBE_FCOE #define IXGBE_MAX_FCOE_INDICES 8 +#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) +#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) +#else +#define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES +#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES #endif /* IXGBE_FCOE */ struct ixgbe_ring_feature { int indices; int mask; } ____cacheline_internodealigned_in_smp; -#define MAX_RX_QUEUES 128 -#define MAX_TX_QUEUES 128 #define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ ? 8 : 1) diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 1066d53e102c..208fb4a97217 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -6061,7 +6061,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, indices += min_t(unsigned int, num_possible_cpus(), IXGBE_MAX_FCOE_INDICES); #endif - indices = min_t(unsigned int, indices, MAX_TX_QUEUES); netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); if (!netdev) { err = -ENOMEM; -- cgit v1.2.3 From a7551b75fe47fb6fb70f679935845e741c5e0855 Mon Sep 17 00:00:00 2001 From: Robert Love Date: Wed, 24 Mar 2010 10:02:04 +0000 Subject: ixgbe: Don't allow user buffer count to exceed 256 If the user buffer count was 256 the shift would place a 1 in the offset region leading to errors. It also overwrites the uers buffer list. This patch makes sure that at most 256 user buffers are allowed for DDP and the buffer count is masked properly such that it doesn't overwrite the offset when shifting the bits. Signed-off-by: Robert Love Signed-off-by: Yi Zou Signed-off-by: Frank Zhang Signed-off-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/ixgbe/ixgbe_fcoe.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c index 700cfc0aa1b9..e1978da49e5b 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ixgbe/ixgbe_fcoe.c @@ -202,6 +202,15 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, addr = sg_dma_address(sg); len = sg_dma_len(sg); while (len) { + /* max number of buffers allowed in one DDP context */ + if (j >= IXGBE_BUFFCNT_MAX) { + netif_err(adapter, drv, adapter->netdev, + "xid=%x:%d,%d,%d:addr=%llx " + "not enough descriptors\n", + xid, i, j, dmacount, (u64)addr); + goto out_noddp_free; + } + /* get the offset of length of current buffer */ thisoff = addr & ((dma_addr_t)bufflen - 1); thislen = min((bufflen - thisoff), len); @@ -227,20 +236,13 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, len -= thislen; addr += thislen; j++; - /* max number of buffers allowed in one DDP context */ - if (j > IXGBE_BUFFCNT_MAX) { - DPRINTK(DRV, ERR, "xid=%x:%d,%d,%d:addr=%llx " - "not enough descriptors\n", - xid, i, j, dmacount, (u64)addr); - goto out_noddp_free; - } } } /* only the last buffer may have non-full bufflen */ lastsize = thisoff + thislen; fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); - fcbuff |= (j << IXGBE_FCBUFF_BUFFCNT_SHIFT); + fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); fcbuff |= (IXGBE_FCBUFF_VALID); -- cgit v1.2.3 From ca77cd59d28456b4061afa5254972ec47fa8baf5 Mon Sep 17 00:00:00 2001 From: Robert Love Date: Wed, 24 Mar 2010 12:45:00 +0000 Subject: ixgbe: Priority tag FIP frames Currently FIP (FCoE Initialization Protocol) frames are going untagged. This causes various problems with FCFs (switches) that have negotiated a priority over dcbx. This patch tags FIP frames with the same priority as the FCoE frames. Signed-off-by: Robert Love Signed-off-by: Chris Leech Signed-off-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/ixgbe/ixgbe_main.c | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 208fb4a97217..0c553f6cb534 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -5648,7 +5648,8 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) #ifdef IXGBE_FCOE if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && - (skb->protocol == htons(ETH_P_FCOE))) { + ((skb->protocol == htons(ETH_P_FCOE)) || + (skb->protocol == htons(ETH_P_FIP)))) { txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); txq += adapter->ring_feature[RING_F_FCOE].mask; return txq; @@ -5695,18 +5696,25 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, tx_ring = adapter->tx_ring[skb->queue_mapping]; - if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && - (skb->protocol == htons(ETH_P_FCOE))) { - tx_flags |= IXGBE_TX_FLAGS_FCOE; #ifdef IXGBE_FCOE + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { #ifdef CONFIG_IXGBE_DCB - tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK - << IXGBE_TX_FLAGS_VLAN_SHIFT); - tx_flags |= ((adapter->fcoe.up << 13) - << IXGBE_TX_FLAGS_VLAN_SHIFT); -#endif + /* for FCoE with DCB, we force the priority to what + * was specified by the switch */ + if ((skb->protocol == htons(ETH_P_FCOE)) || + (skb->protocol == htons(ETH_P_FIP))) { + tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK + << IXGBE_TX_FLAGS_VLAN_SHIFT); + tx_flags |= ((adapter->fcoe.up << 13) + << IXGBE_TX_FLAGS_VLAN_SHIFT); + } #endif + /* flag for FCoE offloads */ + if (skb->protocol == htons(ETH_P_FCOE)) + tx_flags |= IXGBE_TX_FLAGS_FCOE; } +#endif + /* four things can cause us to need a context descriptor */ if (skb_is_gso(skb) || (skb->ip_summed == CHECKSUM_PARTIAL) || -- cgit v1.2.3 From af06393bbde6e8d474622a0517cffc662676e3fe Mon Sep 17 00:00:00 2001 From: Chris Leech Date: Wed, 24 Mar 2010 12:45:21 +0000 Subject: ixgbe: filter FIP frames into the FCoE offload queues During FCF solicitation, the switch is supposed to pad the solicited advertisement out to the endpoints specified maximum FCoE frame size. That means that we need to receive FIP frames that are larger than the standard MTU. To make sure the receive queue is configured correctly, we should be filtering FIP traffic into the FCoE queues. Signed-off-by: Chris Leech Signed-off-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/ixgbe/ixgbe_fcoe.c | 15 +++++++++++++++ drivers/net/ixgbe/ixgbe_type.h | 1 + 2 files changed, 16 insertions(+) diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c index e1978da49e5b..9276d5965b0d 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ixgbe/ixgbe_fcoe.c @@ -522,6 +522,9 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) /* Enable L2 eth type filter for FCoE */ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), (ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN)); + /* Enable L2 eth type filter for FIP */ + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), + (ETH_P_FIP | IXGBE_ETQF_FILTER_EN)); if (adapter->ring_feature[RING_F_FCOE].indices) { /* Use multiple rx queues for FCoE by redirection table */ for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { @@ -532,6 +535,12 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) } IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); + fcoe_i = f->mask; + fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; + fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; + IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP), + IXGBE_ETQS_QUEUE_EN | + (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); } else { /* Use single rx queue for FCoE */ fcoe_i = f->mask; @@ -541,6 +550,12 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) IXGBE_ETQS_QUEUE_EN | (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); } + /* send FIP frames to the first FCoE queue */ + fcoe_i = f->mask; + fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; + IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP), + IXGBE_ETQS_QUEUE_EN | + (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, IXGBE_FCRXCTRL_FCOELLI | diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 0ed5ab37cc53..4ec6dc1a5b75 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -1298,6 +1298,7 @@ #define IXGBE_ETQF_FILTER_BCN 1 #define IXGBE_ETQF_FILTER_FCOE 2 #define IXGBE_ETQF_FILTER_1588 3 +#define IXGBE_ETQF_FILTER_FIP 4 /* VLAN Control Bit Masks */ #define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */ #define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */ -- cgit v1.2.3 From a6d36d5689b1806a3365c909192e9f03a43a632b Mon Sep 17 00:00:00 2001 From: Ben Menchaca Date: Wed, 24 Mar 2010 05:05:02 +0000 Subject: gianfar: fix undo of reserve() Fix undo of reserve() before RX recycle gfar_new_skb reserve()s space in the SKB to align it. If an error occurs, and the skb needs to be returned to the RX recycle queue, the current code attempts to reset head, but did not reset tail. This patch remembers the alignment amount, and reverses the reserve() when needed. Signed-off-by: Ben Menchaca Signed-off-by: David S. Miller --- drivers/net/gianfar.c | 5 +++-- drivers/net/gianfar.h | 6 ++++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index b6715553cf17..669de028d44f 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -2393,6 +2393,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev) * as many bytes as needed to align the data properly */ skb_reserve(skb, alignamount); + GFAR_CB(skb)->alignamount = alignamount; return skb; } @@ -2533,13 +2534,13 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) newskb = skb; else if (skb) { /* - * We need to reset ->data to what it + * We need to un-reserve() the skb to what it * was before gfar_new_skb() re-aligned * it to an RXBUF_ALIGNMENT boundary * before we put the skb back on the * recycle list. */ - skb->data = skb->head + NET_SKB_PAD; + skb_reserve(skb, -GFAR_CB(skb)->alignamount); __skb_queue_head(&priv->rx_recycle, skb); } } else { diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h index 3d72dc43dca5..17d25e714236 100644 --- a/drivers/net/gianfar.h +++ b/drivers/net/gianfar.h @@ -566,6 +566,12 @@ struct rxfcb { u16 vlctl; /* VLAN control word */ }; +struct gianfar_skb_cb { + int alignamount; +}; + +#define GFAR_CB(skb) ((struct gianfar_skb_cb *)((skb)->cb)) + struct rmon_mib { u32 tr64; /* 0x.680 - Transmit and Receive 64-byte Frame Counter */ -- cgit v1.2.3 From ac90a149361a331f697d5aa500bedcff22054669 Mon Sep 17 00:00:00 2001 From: Kyle McMartin Date: Fri, 27 Mar 2009 17:23:32 +0000 Subject: tulip: Fix null dereference in uli526x_rx_packet() Acked-by: Grant Grundler Signed-off-by: David S. Miller --- drivers/net/tulip/uli526x.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c index 0ab05af237e5..90be57bad39d 100644 --- a/drivers/net/tulip/uli526x.c +++ b/drivers/net/tulip/uli526x.c @@ -851,13 +851,15 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info if ( !(rdes0 & 0x8000) || ((db->cr6_data & CR6_PM) && (rxlen>6)) ) { + struct sk_buff *new_skb = NULL; + skb = rxptr->rx_skb_ptr; /* Good packet, send to upper layer */ /* Shorst packet used new SKB */ - if ( (rxlen < RX_COPY_SIZE) && - ( (skb = dev_alloc_skb(rxlen + 2) ) - != NULL) ) { + if ((rxlen < RX_COPY_SIZE) && + ((new_skb = dev_alloc_skb(rxlen + 2) != NULL))) { + skb = new_skb; /* size less than COPY_SIZE, allocate a rxlen SKB */ skb_reserve(skb, 2); /* 16byte align */ memcpy(skb_put(skb, rxlen), -- cgit v1.2.3 From 4b97efdf392563bf03b4917a0b5add2df65de39a Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Fri, 26 Mar 2010 20:27:49 -0700 Subject: net: fix netlink address dumping in IPv4/IPv6 When a dump is interrupted at the last device in a hash chain and then continued, "idx" won't get incremented past s_idx, so s_ip_idx is not reset when moving on to the next device. This means of all following devices only the last n - s_ip_idx addresses are dumped. Tested-by: Pawel Staszewski Signed-off-by: Patrick McHardy --- net/ipv4/devinet.c | 2 +- net/ipv6/addrconf.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 51ca946e3392..3feb2b390308 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -1194,7 +1194,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) hlist_for_each_entry_rcu(dev, node, head, index_hlist) { if (idx < s_idx) goto cont; - if (idx > s_idx) + if (h > s_h || idx > s_idx) s_ip_idx = 0; in_dev = __in_dev_get_rcu(dev); if (!in_dev) diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 3381b4317c27..7e567ae5eaab 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3610,7 +3610,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb, hlist_for_each_entry_rcu(dev, node, head, index_hlist) { if (idx < s_idx) goto cont; - if (idx > s_idx) + if (h > s_h || idx > s_idx) s_ip_idx = 0; ip_idx = 0; if ((idev = __in6_dev_get(dev)) == NULL) -- cgit v1.2.3 From b35ecb5d404c00f2420221ccbb1bbba1139353a4 Mon Sep 17 00:00:00 2001 From: Pavel Emelyanov Date: Wed, 24 Mar 2010 07:43:17 +0000 Subject: ipv4: Cleanup struct net dereference in rt_intern_hash There's no need in getting it 3 times and gcc isn't smart enough to understand this himself. This is just a cleanup before the fix (next patch). Signed-off-by: Pavel Emelyanov Signed-off-by: David S. Miller --- net/ipv4/route.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 54fd68c14c87..124af1605d10 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1212,11 +1212,11 @@ restart: slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) { struct net *net = dev_net(rt->u.dst.dev); int num = ++net->ipv4.current_rt_cache_rebuild_count; - if (!rt_caching(dev_net(rt->u.dst.dev))) { + if (!rt_caching(net)) { printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n", rt->u.dst.dev->name, num); } - rt_emergency_hash_rebuild(dev_net(rt->u.dst.dev)); + rt_emergency_hash_rebuild(net); } } -- cgit v1.2.3 From 6a2bad70d546cf30a46bc6d9ec0cb9a0891a38eb Mon Sep 17 00:00:00 2001 From: Pavel Emelyanov Date: Wed, 24 Mar 2010 21:51:22 +0000 Subject: ipv4: Restart rt_intern_hash after emergency rebuild (v2) The the rebuild changes the genid which in turn is used at the hash calculation. Thus if we don't restart and go on with inserting the rt will happen in wrong chain. (Fixed Neil's comment about the index passed into the rt_intern_hash) Signed-off-by: Pavel Emelyanov Reviewed-by: Neil Horman Signed-off-by: David S. Miller --- net/ipv4/route.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 124af1605d10..d413b57be9b3 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1097,7 +1097,7 @@ static int slow_chain_length(const struct rtable *head) } static int rt_intern_hash(unsigned hash, struct rtable *rt, - struct rtable **rp, struct sk_buff *skb) + struct rtable **rp, struct sk_buff *skb, int ifindex) { struct rtable *rth, **rthp; unsigned long now; @@ -1217,6 +1217,11 @@ restart: rt->u.dst.dev->name, num); } rt_emergency_hash_rebuild(net); + spin_unlock_bh(rt_hash_lock_addr(hash)); + + hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src, + ifindex, rt_genid(net)); + goto restart; } } @@ -1477,7 +1482,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, &netevent); rt_del(hash, rth); - if (!rt_intern_hash(hash, rt, &rt, NULL)) + if (!rt_intern_hash(hash, rt, &rt, NULL, rt->fl.oif)) ip_rt_put(rt); goto do_next; } @@ -1931,7 +1936,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, in_dev_put(in_dev); hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev))); - return rt_intern_hash(hash, rth, NULL, skb); + return rt_intern_hash(hash, rth, NULL, skb, dev->ifindex); e_nobufs: in_dev_put(in_dev); @@ -2098,7 +2103,7 @@ static int ip_mkroute_input(struct sk_buff *skb, /* put it into the cache */ hash = rt_hash(daddr, saddr, fl->iif, rt_genid(dev_net(rth->u.dst.dev))); - return rt_intern_hash(hash, rth, NULL, skb); + return rt_intern_hash(hash, rth, NULL, skb, fl->iif); } /* @@ -2255,7 +2260,7 @@ local_input: } rth->rt_type = res.type; hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net)); - err = rt_intern_hash(hash, rth, NULL, skb); + err = rt_intern_hash(hash, rth, NULL, skb, fl.iif); goto done; no_route: @@ -2502,7 +2507,7 @@ static int ip_mkroute_output(struct rtable **rp, if (err == 0) { hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif, rt_genid(dev_net(dev_out))); - err = rt_intern_hash(hash, rth, rp, NULL); + err = rt_intern_hash(hash, rth, rp, NULL, oldflp->oif); } return err; -- cgit v1.2.3 From a08af745e4c711d22aeadc2adade36958fe03ce8 Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Thu, 25 Mar 2010 12:11:48 +0000 Subject: igbvf: do not modify tx_queue_len on link speed change Previously the driver tweaked txqueuelen to avoid false Tx hang reports seen at half duplex. This had the effect of overriding user set values on link change/reset. Testing shows that adjusting only the timeout factor is sufficient to prevent Tx hang reports at half duplex. Based on e1000e patch by Franco Fichtner CC: Franco Fichtner Signed-off-by: Emil Tantilov Signed-off-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/igbvf/igbvf.h | 1 - drivers/net/igbvf/netdev.c | 11 +---------- 2 files changed, 1 insertion(+), 11 deletions(-) diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h index a1774b29d222..debeee2dc717 100644 --- a/drivers/net/igbvf/igbvf.h +++ b/drivers/net/igbvf/igbvf.h @@ -198,7 +198,6 @@ struct igbvf_adapter { struct igbvf_ring *tx_ring /* One per active queue */ ____cacheline_aligned_in_smp; - unsigned long tx_queue_len; unsigned int restart_queue; u32 txd_cmd; diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c index a77afd8a14bb..b41037ed8083 100644 --- a/drivers/net/igbvf/netdev.c +++ b/drivers/net/igbvf/netdev.c @@ -1304,8 +1304,6 @@ static void igbvf_configure_tx(struct igbvf_adapter *adapter) /* enable Report Status bit */ adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS; - - adapter->tx_queue_len = adapter->netdev->tx_queue_len; } /** @@ -1524,7 +1522,6 @@ void igbvf_down(struct igbvf_adapter *adapter) del_timer_sync(&adapter->watchdog_timer); - netdev->tx_queue_len = adapter->tx_queue_len; netif_carrier_off(netdev); /* record the stats before reset*/ @@ -1857,21 +1854,15 @@ static void igbvf_watchdog_task(struct work_struct *work) &adapter->link_duplex); igbvf_print_link_info(adapter); - /* - * tweak tx_queue_len according to speed/duplex - * and adjust the timeout factor - */ - netdev->tx_queue_len = adapter->tx_queue_len; + /* adjust timeout factor according to speed/duplex */ adapter->tx_timeout_factor = 1; switch (adapter->link_speed) { case SPEED_10: txb2b = 0; - netdev->tx_queue_len = 10; adapter->tx_timeout_factor = 16; break; case SPEED_100: txb2b = 0; - netdev->tx_queue_len = 100; /* maybe add some timeout factor ? */ break; } -- cgit v1.2.3 From f49c57e141c7f53353e4265a31dc2324e6215037 Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Wed, 24 Mar 2010 12:55:02 +0000 Subject: e1000e: do not modify tx_queue_len on link speed change Previously the driver tweaked txqueuelen to avoid false Tx hang reports seen at half duplex. This had the effect of overriding user set values on link change/reset. Testing shows that adjusting only the timeout factor is sufficient to prevent Tx hang reports at half duplex. This patch removes all instances of tx_queue_len in the driver. Originally reported and patched by Franco Fichtner CC: Franco Fichtner Signed-off-by: Emil Tantilov Signed-off-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/e1000e/e1000.h | 1 - drivers/net/e1000e/netdev.c | 11 +---------- 2 files changed, 1 insertion(+), 11 deletions(-) diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index c2ec095d2163..118bdf483593 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h @@ -279,7 +279,6 @@ struct e1000_adapter { struct napi_struct napi; - unsigned long tx_queue_len; unsigned int restart_queue; u32 txd_cmd; diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 88d54d3efcef..e1cceb606576 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -2289,8 +2289,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) ew32(TCTL, tctl); e1000e_config_collision_dist(hw); - - adapter->tx_queue_len = adapter->netdev->tx_queue_len; } /** @@ -2877,7 +2875,6 @@ void e1000e_down(struct e1000_adapter *adapter) del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); - netdev->tx_queue_len = adapter->tx_queue_len; netif_carrier_off(netdev); adapter->link_speed = 0; adapter->link_duplex = 0; @@ -3588,21 +3585,15 @@ static void e1000_watchdog_task(struct work_struct *work) "link gets many collisions.\n"); } - /* - * tweak tx_queue_len according to speed/duplex - * and adjust the timeout factor - */ - netdev->tx_queue_len = adapter->tx_queue_len; + /* adjust timeout factor according to speed/duplex */ adapter->tx_timeout_factor = 1; switch (adapter->link_speed) { case SPEED_10: txb2b = 0; - netdev->tx_queue_len = 10; adapter->tx_timeout_factor = 16; break; case SPEED_100: txb2b = 0; - netdev->tx_queue_len = 100; adapter->tx_timeout_factor = 10; break; } -- cgit v1.2.3 From 71c5c1595c04852d6fbf3c4882b47b30b61a4d32 Mon Sep 17 00:00:00 2001 From: Brandon L Black Date: Fri, 26 Mar 2010 16:18:03 +0000 Subject: net: Add MSG_WAITFORONE flag to recvmmsg Add new flag MSG_WAITFORONE for the recvmmsg() syscall. When this flag is specified for a blocking socket, recvmmsg() will only block until at least 1 packet is available. The default behavior is to block until all vlen packets are available. This flag has no effect on non-blocking sockets or when used in combination with MSG_DONTWAIT. Signed-off-by: Brandon L Black Acked-by: Ulrich Drepper Acked-by: Eric Dumazet Acked-by: Arnaldo Carvalho de Melo Signed-off-by: David S. Miller --- include/linux/socket.h | 1 + net/socket.c | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/include/linux/socket.h b/include/linux/socket.h index 7b3aae2052a6..354cc5617f8b 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -255,6 +255,7 @@ struct ucred { #define MSG_ERRQUEUE 0x2000 /* Fetch message from error queue */ #define MSG_NOSIGNAL 0x4000 /* Do not generate SIGPIPE */ #define MSG_MORE 0x8000 /* Sender will send more */ +#define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */ #define MSG_EOF MSG_FIN diff --git a/net/socket.c b/net/socket.c index 769c386bd428..f55ffe9f8c87 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2135,6 +2135,10 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, break; ++datagrams; + /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */ + if (flags & MSG_WAITFORONE) + flags |= MSG_DONTWAIT; + if (timeout) { ktime_get_ts(timeout); *timeout = timespec_sub(end_time, *timeout); -- cgit v1.2.3 From c0e4d4bad4e8cf0aa787a3045392f949d76b5886 Mon Sep 17 00:00:00 2001 From: wzt wzt Date: Thu, 25 Mar 2010 20:12:59 +0000 Subject: benet: Fix compile warnnings in drivers/net/benet/be_ethtool.c Fix the following warnings: be_ethtool.c:493: warning: integer constant is too large for 'long' type be_ethtool.c:493: warning: integer constant is too large for 'long' type Signed-off-by: Zhitong Wang Acked-by: Ajit Khaparde Signed-off-by: David S. Miller --- drivers/net/benet/be_ethtool.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c index 9560d48944ab..51e1065e7897 100644 --- a/drivers/net/benet/be_ethtool.c +++ b/drivers/net/benet/be_ethtool.c @@ -490,7 +490,7 @@ be_test_ddr_dma(struct be_adapter *adapter) { int ret, i; struct be_dma_mem ddrdma_cmd; - u64 pattern[2] = {0x5a5a5a5a5a5a5a5a, 0xa5a5a5a5a5a5a5a5}; + u64 pattern[2] = {0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL}; ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size, -- cgit v1.2.3 From e017b60316468f21a63bdd4affefaf81a7f988fd Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Thu, 25 Mar 2010 17:15:06 +0000 Subject: igb: use correct bits to identify if managability is enabled igb was previously checking the wrong bits in the MANC register to determine if managability was enabled. As a result it was incorrectly powering down and resetting the phy when it didn't need to. Signed-off-by: Alexander Duyck Signed-off-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/igb/e1000_mac.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c index 2a8a886b37eb..be8d010e4021 100644 --- a/drivers/net/igb/e1000_mac.c +++ b/drivers/net/igb/e1000_mac.c @@ -1367,7 +1367,8 @@ out: * igb_enable_mng_pass_thru - Enable processing of ARP's * @hw: pointer to the HW structure * - * Verifies the hardware needs to allow ARPs to be processed by the host. + * Verifies the hardware needs to leave interface enabled so that frames can + * be directed to and from the management interface. **/ bool igb_enable_mng_pass_thru(struct e1000_hw *hw) { @@ -1380,8 +1381,7 @@ bool igb_enable_mng_pass_thru(struct e1000_hw *hw) manc = rd32(E1000_MANC); - if (!(manc & E1000_MANC_RCV_TCO_EN) || - !(manc & E1000_MANC_EN_MAC_ADDR_FILTER)) + if (!(manc & E1000_MANC_RCV_TCO_EN)) goto out; if (hw->mac.arc_subsystem_valid) { -- cgit v1.2.3 From e7d481a6f3c13041446b7bb8f98ab861460076a3 Mon Sep 17 00:00:00 2001 From: Greg Rose Date: Thu, 25 Mar 2010 17:06:48 +0000 Subject: ixgbe: Do not run all Diagnostic offline tests when VFs are active When running the offline diagnostic tests check to see if any VFs are online. If so then only run the link test. This is necessary because the VFs running in guest VMs aren't aware of when the PF is taken offline for a diagnostic test. Also put a message to the system log telling the system administrator to take the VFs offline manually if (s)he wants to run a full diagnostic. Return 1 on each of the tests not run to alert the user of the condition. Signed-off-by: Greg Rose Signed-off-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/ixgbe/ixgbe_ethtool.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 7949a446e4c7..1959ef76c962 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -1853,6 +1853,26 @@ static void ixgbe_diag_test(struct net_device *netdev, if (ixgbe_link_test(adapter, &data[4])) eth_test->flags |= ETH_TEST_FL_FAILED; + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + int i; + for (i = 0; i < adapter->num_vfs; i++) { + if (adapter->vfinfo[i].clear_to_send) { + netdev_warn(netdev, "%s", + "offline diagnostic is not " + "supported when VFs are " + "present\n"); + data[0] = 1; + data[1] = 1; + data[2] = 1; + data[3] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + clear_bit(__IXGBE_TESTING, + &adapter->state); + goto skip_ol_tests; + } + } + } + if (if_running) /* indicate we're in test mode */ dev_close(netdev); @@ -1908,6 +1928,7 @@ skip_loopback: clear_bit(__IXGBE_TESTING, &adapter->state); } +skip_ol_tests: msleep_interruptible(4 * 1000); } -- cgit v1.2.3 From 7438189baa0a2fe30084bdc97e3d540ebc5444f3 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Thu, 25 Mar 2010 23:45:35 +0000 Subject: net: ipmr/ip6mr: prevent out-of-bounds vif_table access When cache is unresolved, c->mf[6]c_parent is set to 65535 and minvif, maxvif are not initialized, hence we must avoid to parse IIF and OIF. A second problem can happen when the user dumps a cache entry where a VIF, that was referenced at creation time, has been removed. Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- net/ipv4/ipmr.c | 11 +++++++---- net/ipv6/ip6mr.c | 11 +++++++---- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 0b9d03c54dc3..d0a6092a67be 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -1616,17 +1616,20 @@ ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm) int ct; struct rtnexthop *nhp; struct net *net = mfc_net(c); - struct net_device *dev = net->ipv4.vif_table[c->mfc_parent].dev; u8 *b = skb_tail_pointer(skb); struct rtattr *mp_head; - if (dev) - RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex); + /* If cache is unresolved, don't try to parse IIF and OIF */ + if (c->mfc_parent > MAXVIFS) + return -ENOENT; + + if (VIF_EXISTS(net, c->mfc_parent)) + RTA_PUT(skb, RTA_IIF, 4, &net->ipv4.vif_table[c->mfc_parent].dev->ifindex); mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { - if (c->mfc_un.res.ttls[ct] < 255) { + if (VIF_EXISTS(net, ct) && c->mfc_un.res.ttls[ct] < 255) { if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) goto rtattr_failure; nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 23e4ac0cc30e..27acfb58650a 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -1695,17 +1695,20 @@ ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm) int ct; struct rtnexthop *nhp; struct net *net = mfc6_net(c); - struct net_device *dev = net->ipv6.vif6_table[c->mf6c_parent].dev; u8 *b = skb_tail_pointer(skb); struct rtattr *mp_head; - if (dev) - RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex); + /* If cache is unresolved, don't try to parse IIF and OIF */ + if (c->mf6c_parent > MAXMIFS) + return -ENOENT; + + if (MIF_EXISTS(net, c->mf6c_parent)) + RTA_PUT(skb, RTA_IIF, 4, &net->ipv6.vif6_table[c->mf6c_parent].dev->ifindex); mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { - if (c->mfc_un.res.ttls[ct] < 255) { + if (MIF_EXISTS(net, ct) && c->mfc_un.res.ttls[ct] < 255) { if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) goto rtattr_failure; nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); -- cgit v1.2.3 From 39ca5f033bb2ea18877632809185268eebbb37a9 Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Fri, 26 Mar 2010 11:25:58 +0000 Subject: e1000: do not modify tx_queue_len on link speed change Previously the driver tweaked txqueuelen to avoid false Tx hang reports seen at half duplex. This had the effect of overriding user set values on link change/reset. Testing shows that adjusting only the timeout factor is sufficient to prevent Tx hang reports at half duplex. This patch removes all instances of tx_queue_len in the driver. Based on e1000e patch by Franco Fichtner CC: Franco Fichtner Signed-off-by: Emil Tantilov Acked-by: Jesse Brandeburg Signed-off-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/e1000/e1000.h | 1 - drivers/net/e1000/e1000_main.c | 9 +-------- 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h index 9902b33b7160..2f29c2131851 100644 --- a/drivers/net/e1000/e1000.h +++ b/drivers/net/e1000/e1000.h @@ -261,7 +261,6 @@ struct e1000_adapter { /* TX */ struct e1000_tx_ring *tx_ring; /* One per active queue */ unsigned int restart_queue; - unsigned long tx_queue_len; u32 txd_cmd; u32 tx_int_delay; u32 tx_abs_int_delay; diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 8be6faee43e6..b15ece26ed84 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -383,8 +383,6 @@ static void e1000_configure(struct e1000_adapter *adapter) adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); } - - adapter->tx_queue_len = netdev->tx_queue_len; } int e1000_up(struct e1000_adapter *adapter) @@ -503,7 +501,6 @@ void e1000_down(struct e1000_adapter *adapter) del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); - netdev->tx_queue_len = adapter->tx_queue_len; adapter->link_speed = 0; adapter->link_duplex = 0; netif_carrier_off(netdev); @@ -2316,19 +2313,15 @@ static void e1000_watchdog(unsigned long data) E1000_CTRL_RFCE) ? "RX" : ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" ))); - /* tweak tx_queue_len according to speed/duplex - * and adjust the timeout factor */ - netdev->tx_queue_len = adapter->tx_queue_len; + /* adjust timeout factor according to speed/duplex */ adapter->tx_timeout_factor = 1; switch (adapter->link_speed) { case SPEED_10: txb2b = false; - netdev->tx_queue_len = 10; adapter->tx_timeout_factor = 16; break; case SPEED_100: txb2b = false; - netdev->tx_queue_len = 100; /* maybe add some timeout factor ? */ break; } -- cgit v1.2.3 From 44ebb95290afcc687511ad3f7fd6434e867c270a Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Fri, 26 Mar 2010 16:27:55 +0000 Subject: drivers/net: Fix continuation lines Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- drivers/net/atlx/atl1.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c index 9ba547069db3..0ebd8208f606 100644 --- a/drivers/net/atlx/atl1.c +++ b/drivers/net/atlx/atl1.c @@ -84,7 +84,7 @@ #define ATLX_DRIVER_VERSION "2.1.3" MODULE_AUTHOR("Xiong Huang , \ - Chris Snook , Jay Cliburn "); +Chris Snook , Jay Cliburn "); MODULE_LICENSE("GPL"); MODULE_VERSION(ATLX_DRIVER_VERSION); -- cgit v1.2.3 From a2fd940f4cff74b932728bd6ca12848da21a0234 Mon Sep 17 00:00:00 2001 From: Andy Gospodarek Date: Thu, 25 Mar 2010 14:49:05 +0000 Subject: bonding: fix broken multicast with round-robin mode Round-robin (mode 0) does nothing to ensure that any multicast traffic originally destined for the host will continue to arrive at the host when the link that sent the IGMP join or membership report goes down. One of the benefits of absolute round-robin transmit. Keeping track of subscribed multicast groups for each slave did not seem like a good use of resources, so I decided to simply send on the curr_active slave of the bond (typically the first enslaved device that is up). This makes failover management simple as IGMP membership reports only need to be sent when the curr_active_slave changes. I tested this patch and it appears to work as expected. Originally reported by Lon Hohberger . Signed-off-by: Andy Gospodarek CC: Lon Hohberger CC: Jay Vosburgh Signed-off-by: Jay Vosburgh Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 40 ++++++++++++++++++++++++++++++++-------- 1 file changed, 32 insertions(+), 8 deletions(-) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 430c02267d7e..5b92fbff431d 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1235,6 +1235,11 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) write_lock_bh(&bond->curr_slave_lock); } } + + /* resend IGMP joins since all were sent on curr_active_slave */ + if (bond->params.mode == BOND_MODE_ROUNDROBIN) { + bond_resend_igmp_join_requests(bond); + } } /** @@ -4138,22 +4143,41 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev struct bonding *bond = netdev_priv(bond_dev); struct slave *slave, *start_at; int i, slave_no, res = 1; + struct iphdr *iph = ip_hdr(skb); read_lock(&bond->lock); if (!BOND_IS_OK(bond)) goto out; - /* - * Concurrent TX may collide on rr_tx_counter; we accept that - * as being rare enough not to justify using an atomic op here + * Start with the curr_active_slave that joined the bond as the + * default for sending IGMP traffic. For failover purposes one + * needs to maintain some consistency for the interface that will + * send the join/membership reports. The curr_active_slave found + * will send all of this type of traffic. */ - slave_no = bond->rr_tx_counter++ % bond->slave_cnt; + if ((iph->protocol == htons(IPPROTO_IGMP)) && + (skb->protocol == htons(ETH_P_IP))) { - bond_for_each_slave(bond, slave, i) { - slave_no--; - if (slave_no < 0) - break; + read_lock(&bond->curr_slave_lock); + slave = bond->curr_active_slave; + read_unlock(&bond->curr_slave_lock); + + if (!slave) + goto out; + } else { + /* + * Concurrent TX may collide on rr_tx_counter; we accept + * that as being rare enough not to justify using an + * atomic op here. + */ + slave_no = bond->rr_tx_counter++ % bond->slave_cnt; + + bond_for_each_slave(bond, slave, i) { + slave_no--; + if (slave_no < 0) + break; + } } start_at = slave; -- cgit v1.2.3 From 1546a713ae1f066f83469cdd99ebdf500d6a65e4 Mon Sep 17 00:00:00 2001 From: Ken Kawasaki Date: Sat, 27 Mar 2010 10:55:37 +0000 Subject: pcnet_cs: add new id pcnet_cs: *add new id (Allied Telesis LM33-PCM-T Lan&Modem multifunction card) *use PROD_ID for LA-PCM.(because LA-PCM and LM33-PCM-T use the same MANF_ID). Signed-off-by: Ken Kawasaki Signed-off-by: David S. Miller --- drivers/net/pcmcia/pcnet_cs.c | 3 ++- drivers/serial/serial_cs.c | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index 776cad2f5715..1028fcb91a28 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c @@ -1549,6 +1549,7 @@ static struct pcmcia_device_id pcnet_ids[] = { PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x021b, 0x0101), PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x08a1, 0xc0ab), PCMCIA_PFC_DEVICE_PROD_ID12(0, "AnyCom", "Fast Ethernet + 56K COMBO", 0x578ba6e7, 0xb0ac62c4), + PCMCIA_PFC_DEVICE_PROD_ID12(0, "ATKK", "LM33-PCM-T", 0xba9eb7e2, 0x077c174e), PCMCIA_PFC_DEVICE_PROD_ID12(0, "D-Link", "DME336T", 0x1a424a1c, 0xb23897ff), PCMCIA_PFC_DEVICE_PROD_ID12(0, "Grey Cell", "GCS3000", 0x2a151fac, 0x48b932ae), PCMCIA_PFC_DEVICE_PROD_ID12(0, "Linksys", "EtherFast 10&100 + 56K PC Card (PCMLM56)", 0x0733cc81, 0xb3765033), @@ -1740,7 +1741,7 @@ static struct pcmcia_device_id pcnet_ids[] = { PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"), PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"), PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"), - PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"), + PCMCIA_DEVICE_CIS_PROD_ID12("Allied Telesis,K.K", "Ethernet LAN Card", 0x2ad62f3c, 0x9fd2f0a2, "cis/LA-PCM.cis"), PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "cis/PE520.cis"), PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"), PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "cis/PE-200.cis"), diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c index e91db4b38012..175d202ab37e 100644 --- a/drivers/serial/serial_cs.c +++ b/drivers/serial/serial_cs.c @@ -745,6 +745,7 @@ static struct pcmcia_device_id serial_ids[] = { PCMCIA_PFC_DEVICE_PROD_ID13(1, "Xircom", "REM10", 0x2e3ee845, 0x76df1d29), PCMCIA_PFC_DEVICE_PROD_ID13(1, "Xircom", "XEM5600", 0x2e3ee845, 0xf1403719), PCMCIA_PFC_DEVICE_PROD_ID12(1, "AnyCom", "Fast Ethernet + 56K COMBO", 0x578ba6e7, 0xb0ac62c4), + PCMCIA_PFC_DEVICE_PROD_ID12(1, "ATKK", "LM33-PCM-T", 0xba9eb7e2, 0x077c174e), PCMCIA_PFC_DEVICE_PROD_ID12(1, "D-Link", "DME336T", 0x1a424a1c, 0xb23897ff), PCMCIA_PFC_DEVICE_PROD_ID12(1, "Gateway 2000", "XJEM3336", 0xdd9989be, 0x662c394c), PCMCIA_PFC_DEVICE_PROD_ID12(1, "Grey Cell", "GCS3000", 0x2a151fac, 0x48b932ae), -- cgit v1.2.3 From 78f1cd02457252e1ffbc6caa44a17424a45286b8 Mon Sep 17 00:00:00 2001 From: Francois Romieu Date: Sat, 27 Mar 2010 19:35:46 -0700 Subject: r8169: fix broken register writes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is quite similar to b39fe41f481d20c201012e4483e76c203802dda7 though said registers are not even documented as 64-bit registers - as opposed to the initial TxDescStartAddress ones - but as single bytes which must be combined into 32 bits at the MMIO read/write level before being merged into a 64 bit logical entity. Credits go to Ben Hutchings for the MAR registers (aka "multicast is broken for ages on ARM) and to Timo Teräs for the MAC registers. Signed-off-by: Francois Romieu Signed-off-by: David S. Miller --- drivers/net/r8169.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index b93fd23b9f0d..7193afc00e47 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -2820,8 +2820,8 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) spin_lock_irq(&tp->lock); RTL_W8(Cfg9346, Cfg9346_Unlock); - RTL_W32(MAC0, low); RTL_W32(MAC4, high); + RTL_W32(MAC0, low); RTL_W8(Cfg9346, Cfg9346_Lock); spin_unlock_irq(&tp->lock); @@ -4747,8 +4747,8 @@ static void rtl_set_rx_mode(struct net_device *dev) mc_filter[1] = swab32(data); } - RTL_W32(MAR0 + 0, mc_filter[0]); RTL_W32(MAR0 + 4, mc_filter[1]); + RTL_W32(MAR0 + 0, mc_filter[0]); RTL_W32(RxConfig, tmp); -- cgit v1.2.3 From 7855f761998893bb6bf861d55df95036fc9e36ab Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 28 Mar 2010 18:56:34 -0700 Subject: tulip: Add missing parens. As reported by Stephen Rothwell. drivers/net/tulip/uli526x.c: In function 'uli526x_rx_packet': drivers/net/tulip/uli526x.c:861: warning: assignment makes pointer from integer without a cast Signed-off-by: David S. Miller --- drivers/net/tulip/uli526x.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c index 90be57bad39d..a4f09d490531 100644 --- a/drivers/net/tulip/uli526x.c +++ b/drivers/net/tulip/uli526x.c @@ -858,7 +858,7 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info /* Good packet, send to upper layer */ /* Shorst packet used new SKB */ if ((rxlen < RX_COPY_SIZE) && - ((new_skb = dev_alloc_skb(rxlen + 2) != NULL))) { + (((new_skb = dev_alloc_skb(rxlen + 2)) != NULL))) { skb = new_skb; /* size less than COPY_SIZE, allocate a rxlen SKB */ skb_reserve(skb, 2); /* 16byte align */ -- cgit v1.2.3 From 54c1a859efd9fd6cda05bc700315ba2519c14eba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?YOSHIFUJI=20Hideaki=20/=20=E5=90=89=E8=97=A4=E8=8B=B1?= =?UTF-8?q?=E6=98=8E?= Date: Sun, 28 Mar 2010 07:15:45 +0000 Subject: ipv6: Don't drop cache route entry unless timer actually expired. This is ipv6 variant of the commit 5e016cbf6.. ("ipv4: Don't drop redirected route cache entry unless PTMU actually expired") by Guenter Roeck . Remove cache route entry in ipv6_negative_advice() only if the timer is expired. Signed-off-by: YOSHIFUJI Hideaki Signed-off-by: David S. Miller --- net/ipv6/route.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 7fcb0e5d1213..0d7713c5c206 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -890,12 +890,17 @@ static struct dst_entry *ip6_negative_advice(struct dst_entry *dst) struct rt6_info *rt = (struct rt6_info *) dst; if (rt) { - if (rt->rt6i_flags & RTF_CACHE) - ip6_del_rt(rt); - else + if (rt->rt6i_flags & RTF_CACHE) { + if (rt6_check_expired(rt)) { + ip6_del_rt(rt); + dst = NULL; + } + } else { dst_release(dst); + dst = NULL; + } } - return NULL; + return dst; } static void ip6_link_failure(struct sk_buff *skb) -- cgit v1.2.3 From c0cd884af045338476b8e69a61fceb3f34ff22f1 Mon Sep 17 00:00:00 2001 From: Neil Horman Date: Mon, 29 Mar 2010 13:16:02 -0700 Subject: r8169: offical fix for CVE-2009-4537 (overlength frame DMAs) Official patch to fix the r8169 frame length check error. Based on this initial thread: http://marc.info/?l=linux-netdev&m=126202972828626&w=1 This is the official patch to fix the frame length problems in the r8169 driver. As noted in the previous thread, while this patch incurs a performance hit on the driver, its possible to improve performance dynamically by updating the mtu and rx_copybreak values at runtime to return performance to what it was for those NICS which are unaffected by the ideosyncracy (if there are any). Summary: A while back Eric submitted a patch for r8169 in which the proper allocated frame size was written to RXMaxSize to prevent the NIC from dmaing too much data. This was done in commit fdd7b4c3302c93f6833e338903ea77245eb510b4. A long time prior to that however, Francois posted 126fa4b9ca5d9d7cb7d46f779ad3bd3631ca387c, which expiclitly disabled the MaxSize setting due to the fact that the hardware behaved in odd ways when overlong frames were received on NIC's supported by this driver. This was mentioned in a security conference recently: http://events.ccc.de/congress/2009/Fahrplan//events/3596.en.html It seems that if we can't enable frame size filtering, then, as Eric correctly noticed, we can find ourselves DMA-ing too much data to a buffer, causing corruption. As a result is seems that we are forced to allocate a frame which is ready to handle a maximally sized receive. This obviously has performance issues with it, so to mitigate that issue, this patch does two things: 1) Raises the copybreak value to the frame allocation size, which should force appropriately sized packets to get allocated on rx, rather than a full new 16k buffer. 2) This patch only disables frame filtering initially (i.e., during the NIC open), changing the MTU results in ring buffer allocation of a size in relation to the new mtu (along with a warning indicating that this is dangerous). Because of item (2), individuals who can't cope with the performance hit (or can otherwise filter frames to prevent the bug), or who have hardware they are sure is unaffected by this issue, can manually lower the copybreak and reset the mtu such that performance is restored easily. Signed-off-by: Neil Horman Signed-off-by: David S. Miller --- drivers/net/r8169.c | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 7193afc00e47..96740051cdcc 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -186,7 +186,12 @@ static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = { MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl); -static int rx_copybreak = 200; +/* + * we set our copybreak very high so that we don't have + * to allocate 16k frames all the time (see note in + * rtl8169_open() + */ +static int rx_copybreak = 16383; static int use_dac; static struct { u32 msg_enable; @@ -3217,9 +3222,13 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev) } static void rtl8169_set_rxbufsize(struct rtl8169_private *tp, - struct net_device *dev) + unsigned int mtu) { - unsigned int max_frame = dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; + unsigned int max_frame = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; + + if (max_frame != 16383) + printk(KERN_WARNING "WARNING! Changing of MTU on this NIC" + "May lead to frame reception errors!\n"); tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE; } @@ -3231,7 +3240,17 @@ static int rtl8169_open(struct net_device *dev) int retval = -ENOMEM; - rtl8169_set_rxbufsize(tp, dev); + /* + * Note that we use a magic value here, its wierd I know + * its done because, some subset of rtl8169 hardware suffers from + * a problem in which frames received that are longer than + * the size set in RxMaxSize register return garbage sizes + * when received. To avoid this we need to turn off filtering, + * which is done by setting a value of 16383 in the RxMaxSize register + * and allocating 16k frames to handle the largest possible rx value + * thats what the magic math below does. + */ + rtl8169_set_rxbufsize(tp, 16383 - VLAN_ETH_HLEN - ETH_FCS_LEN); /* * Rx and Tx desscriptors needs 256 bytes alignment. @@ -3884,7 +3903,7 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) rtl8169_down(dev); - rtl8169_set_rxbufsize(tp, dev); + rtl8169_set_rxbufsize(tp, dev->mtu); ret = rtl8169_init_ring(dev); if (ret < 0) -- cgit v1.2.3 From 368d06f5b0eefcbf37d677d3b65381310a251f03 Mon Sep 17 00:00:00 2001 From: "John W. Linville" Date: Tue, 16 Mar 2010 15:40:59 -0400 Subject: wireless: convert reg_regdb_search_lock to mutex Stanse discovered that kmalloc is being called with GFP_KERNEL while holding this spinlock. The spinlock can be a mutex instead, which also enables the removal of the unlock/lock around the lock/unlock of cfg80211_mutex and the call to set_regdom. Reported-by: Jiri Slaby Cc: stable@kernel.org Signed-off-by: John W. Linville --- net/wireless/reg.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/net/wireless/reg.c b/net/wireless/reg.c index ed89c59bb431..81fcafc60150 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -324,7 +324,7 @@ struct reg_regdb_search_request { }; static LIST_HEAD(reg_regdb_search_list); -static DEFINE_SPINLOCK(reg_regdb_search_lock); +static DEFINE_MUTEX(reg_regdb_search_mutex); static void reg_regdb_search(struct work_struct *work) { @@ -332,7 +332,7 @@ static void reg_regdb_search(struct work_struct *work) const struct ieee80211_regdomain *curdom, *regdom; int i, r; - spin_lock(®_regdb_search_lock); + mutex_lock(®_regdb_search_mutex); while (!list_empty(®_regdb_search_list)) { request = list_first_entry(®_regdb_search_list, struct reg_regdb_search_request, @@ -346,18 +346,16 @@ static void reg_regdb_search(struct work_struct *work) r = reg_copy_regd(®dom, curdom); if (r) break; - spin_unlock(®_regdb_search_lock); mutex_lock(&cfg80211_mutex); set_regdom(regdom); mutex_unlock(&cfg80211_mutex); - spin_lock(®_regdb_search_lock); break; } } kfree(request); } - spin_unlock(®_regdb_search_lock); + mutex_unlock(®_regdb_search_mutex); } static DECLARE_WORK(reg_regdb_work, reg_regdb_search); @@ -375,9 +373,9 @@ static void reg_regdb_query(const char *alpha2) memcpy(request->alpha2, alpha2, 2); - spin_lock(®_regdb_search_lock); + mutex_lock(®_regdb_search_mutex); list_add_tail(&request->list, ®_regdb_search_list); - spin_unlock(®_regdb_search_lock); + mutex_unlock(®_regdb_search_mutex); schedule_work(®_regdb_work); } -- cgit v1.2.3 From c7a00dc73b7185ab2ebd1aa7ce710c7b4edc77a4 Mon Sep 17 00:00:00 2001 From: "John W. Linville" Date: Wed, 17 Mar 2010 11:28:18 -0400 Subject: mac80211: correct typos in "unavailable upon resume" warning Signed-off-by: John W. Linville --- net/mac80211/util.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/mac80211/util.c b/net/mac80211/util.c index c453226f06b2..e6b1fba980c6 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1097,9 +1097,9 @@ int ieee80211_reconfig(struct ieee80211_local *local) */ res = drv_start(local); if (res) { - WARN(local->suspended, "Harware became unavailable " - "upon resume. This is could be a software issue" - "prior to suspend or a hardware issue\n"); + WARN(local->suspended, "Hardware became unavailable " + "upon resume. This could be a software issue " + "prior to suspend or a hardware issue.\n"); return res; } -- cgit v1.2.3 From 533866b12cce484994163b1e201778cbac4c04c5 Mon Sep 17 00:00:00 2001 From: "Porsch, Marco" Date: Wed, 24 Feb 2010 09:53:13 +0100 Subject: mac80211: fix PREQ processing and one small bug 1st) a PREQ should only be processed, if it has the same SN and better metric (instead of better or equal). 2nd) next_hop[ETH_ALEN] now actually used to buffer mpath->next_hop->sta.addr for use out of lock. Signed-off-by: Marco Porsch Acked-by: Javier Cardona Cc: stable@kernel.org Signed-off-by: John W. Linville --- net/mac80211/mesh_hwmp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index ce84237ebad3..ccff6133e19a 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -391,7 +391,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, if (SN_GT(mpath->sn, orig_sn) || (mpath->sn == orig_sn && action == MPATH_PREQ && - new_metric > mpath->metric)) { + new_metric >= mpath->metric)) { process = false; fresh_info = false; } @@ -611,7 +611,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata, mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr, cpu_to_le32(orig_sn), 0, target_addr, - cpu_to_le32(target_sn), mpath->next_hop->sta.addr, hopcount, + cpu_to_le32(target_sn), next_hop, hopcount, ttl, cpu_to_le32(lifetime), cpu_to_le32(metric), 0, sdata); rcu_read_unlock(); -- cgit v1.2.3 From eb3d72c8b7e6bb6a55e15272c52eb4eadf7fb1f1 Mon Sep 17 00:00:00 2001 From: Ben Konrath Date: Thu, 18 Mar 2010 19:06:57 -0400 Subject: ar9170: add support for NEC WL300NU-G USB dongle This patch adds support for the NEC WL300NU-G USB wifi dongle. Signed-off-by: Ben Konrath Signed-off-by: John W. Linville --- drivers/net/wireless/ath/ar9170/usb.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c index 4e30197afff6..c18575070061 100644 --- a/drivers/net/wireless/ath/ar9170/usb.c +++ b/drivers/net/wireless/ath/ar9170/usb.c @@ -94,6 +94,8 @@ static struct usb_device_id ar9170_usb_ids[] = { { USB_DEVICE(0x04bb, 0x093f) }, /* AVM FRITZ!WLAN USB Stick N */ { USB_DEVICE(0x057C, 0x8401) }, + /* NEC WL300NU-G */ + { USB_DEVICE(0x0409, 0x0249) }, /* AVM FRITZ!WLAN USB Stick N 2.4 */ { USB_DEVICE(0x057C, 0x8402), .driver_info = AR9170_REQ_FW1_ONLY }, -- cgit v1.2.3 From e5868ba10c3c5d8a56c06bbafe098103356ac03f Mon Sep 17 00:00:00 2001 From: Benjamin Larsson Date: Fri, 19 Mar 2010 01:46:10 +0100 Subject: Add a pci-id to the mwl8k driver Signed-off-by: Benjamin Larsson Signed-off-by: John W. Linville --- drivers/net/wireless/mwl8k.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c index ac65e13eb0de..4e58ebe15580 100644 --- a/drivers/net/wireless/mwl8k.c +++ b/drivers/net/wireless/mwl8k.c @@ -3851,6 +3851,7 @@ MODULE_FIRMWARE("mwl8k/helper_8366.fw"); MODULE_FIRMWARE("mwl8k/fmimage_8366.fw"); static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = { + { PCI_VDEVICE(MARVELL, 0x2a0a), .driver_data = MWL8363, }, { PCI_VDEVICE(MARVELL, 0x2a0c), .driver_data = MWL8363, }, { PCI_VDEVICE(MARVELL, 0x2a24), .driver_data = MWL8363, }, { PCI_VDEVICE(MARVELL, 0x2a2b), .driver_data = MWL8687, }, -- cgit v1.2.3 From 05a9a1617026977422c7c5ed3aeac6f46fa2132c Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Wed, 17 Mar 2010 14:37:16 +0100 Subject: Add USB ID for Thomson SpeedTouch 120g to p54usb id table Thanks to Chris Chabot for giving his old wireless usb dongle to me to test it under Linux. Signed-off-by: Hans de Goede Signed-off-by: John W. Linville --- drivers/net/wireless/p54/p54usb.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c index b3c4fbd80d8d..e3cfc001d2fd 100644 --- a/drivers/net/wireless/p54/p54usb.c +++ b/drivers/net/wireless/p54/p54usb.c @@ -35,6 +35,7 @@ MODULE_FIRMWARE("isl3887usb"); static struct usb_device_id p54u_table[] __devinitdata = { /* Version 1 devices (pci chip + net2280) */ {USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */ + {USB_DEVICE(0x06b9, 0x0120)}, /* Thomson SpeedTouch 120g */ {USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */ {USB_DEVICE(0x07aa, 0x001c)}, /* Corega CG-WLUSB2GT */ {USB_DEVICE(0x083a, 0x4501)}, /* Accton 802.11g WN4501 USB */ -- cgit v1.2.3 From 7236fe29fd72d17074574ba312e7f1bb9d10abaa Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 22 Mar 2010 13:42:43 -0700 Subject: mac80211: move netdev queue enabling to correct spot "mac80211: fix skb buffering issue" still left a race between enabling the hardware queues and the virtual interface queues. In hindsight it's totally obvious that enabling the netdev queues for a hardware queue when the hardware queue is enabled is wrong, because it could well possible that we can fill the hw queue with packets we already have pending. Thus, we must only enable the netdev queues once all the pending packets have been processed and sent off to the device. In testing, I haven't been able to trigger this race condition, but it's clearly there, possibly only when aggregation is being enabled. Signed-off-by: Johannes Berg Cc: stable@kernel.org Signed-off-by: John W. Linville --- net/mac80211/tx.c | 6 ++++++ net/mac80211/util.c | 12 ++++++------ 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index cbe53ed4fb0b..cfc473e1b050 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1991,6 +1991,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local, void ieee80211_tx_pending(unsigned long data) { struct ieee80211_local *local = (struct ieee80211_local *)data; + struct ieee80211_sub_if_data *sdata; unsigned long flags; int i; bool txok; @@ -2029,6 +2030,11 @@ void ieee80211_tx_pending(unsigned long data) if (!txok) break; } + + if (skb_queue_empty(&local->pending[i])) + list_for_each_entry_rcu(sdata, &local->interfaces, list) + netif_tx_wake_queue( + netdev_get_tx_queue(sdata->dev, i)); } spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); diff --git a/net/mac80211/util.c b/net/mac80211/util.c index e6b1fba980c6..53af57047435 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -279,13 +279,13 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue, /* someone still has this queue stopped */ return; - if (!skb_queue_empty(&local->pending[queue])) + if (skb_queue_empty(&local->pending[queue])) { + rcu_read_lock(); + list_for_each_entry_rcu(sdata, &local->interfaces, list) + netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue)); + rcu_read_unlock(); + } else tasklet_schedule(&local->tx_pending_tasklet); - - rcu_read_lock(); - list_for_each_entry_rcu(sdata, &local->interfaces, list) - netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue)); - rcu_read_unlock(); } void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, -- cgit v1.2.3 From f6c8f1523a2de3b84340e45913cbcee8bee74570 Mon Sep 17 00:00:00 2001 From: Reinette Chatre Date: Fri, 12 Mar 2010 11:13:26 -0800 Subject: iwlwifi: fix regulatory Commit "cfg80211: convert bools into flags" mistakenly modified iwlwifi's regulatory settings instead of just converting it. Fix this. This fixes http://bugzilla.intellinuxwireless.org/show_bug.cgi?id=2172 Signed-off-by: Reinette Chatre CC: stable@kernel.org --- drivers/net/wireless/iwlwifi/iwl-agn.c | 2 +- drivers/net/wireless/iwlwifi/iwl3945-base.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index 818367b57bab..c8e2e3a42b4b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -2644,7 +2644,7 @@ static int iwl_mac_setup_register(struct iwl_priv *priv) BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC); - hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY | + hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS; /* diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index 54daa38ecba3..f93013e9b17d 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c @@ -3921,7 +3921,7 @@ static int iwl3945_setup_mac(struct iwl_priv *priv) BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC); - hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY | + hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS; hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; -- cgit v1.2.3 From be6b38bcb175613f239e0b302607db346472c6b6 Mon Sep 17 00:00:00 2001 From: Wey-Yi Guy Date: Thu, 18 Mar 2010 09:05:00 -0700 Subject: iwlwifi: counting number of tfds can be free for 4965 Forget one hunk in 4965 during "iwlwifi: error checking for number of tfds in queue" patch. Reported-by: Shanyu Zhao Signed-off-by: Wey-Yi Guy Signed-off-by: Reinette Chatre CC: stable@kernel.org --- drivers/net/wireless/iwlwifi/iwl-4965.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c index 1bd2cd836026..83c52a682622 100644 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c @@ -2041,16 +2041,14 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv, tx_resp->failure_frame); freed = iwl_tx_queue_reclaim(priv, txq_id, index); - if (qc && likely(sta_id != IWL_INVALID_STATION)) - priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; + iwl_free_tfds_in_queue(priv, sta_id, tid, freed); if (priv->mac80211_registered && (iwl_queue_space(&txq->q) > txq->q.low_mark)) iwl_wake_queue(priv, txq_id); } - if (qc && likely(sta_id != IWL_INVALID_STATION)) - iwl_txq_check_empty(priv, sta_id, tid, txq_id); + iwl_txq_check_empty(priv, sta_id, tid, txq_id); if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n"); -- cgit v1.2.3 From 48a6be6a0dd3982bb2d48e82b3e6f5458d9f3c63 Mon Sep 17 00:00:00 2001 From: Shanyu Zhao Date: Tue, 16 Mar 2010 10:22:26 -0700 Subject: iwlwifi: clear unattended interrupts in tasklet Previously in interrupt handling tasklet, iwlwifi driver only clear/ack those interrupts that are enabled by the driver through inta_mask. If the hardware generates unattended interrupts, driver will not ack them, defeating the interrupt coalescing feature. This results in high number of interrupts per second and high CPU utilization. This patch addresses this issue by acking those unattended interrupts in the tasklet. Local test showed an order of magnitude improvement in terms of the number of interrupts without sacrificing networking throughput. This is a workaround for hardware issue. Signed-off-by: Shanyu Zhao Signed-off-by: Zhu Yi Signed-off-by: Reinette Chatre --- drivers/net/wireless/iwlwifi/iwl-agn.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index c8e2e3a42b4b..e4c2e1e448ad 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -1258,7 +1258,15 @@ static void iwl_irq_tasklet(struct iwl_priv *priv) /* Ack/clear/reset pending uCode interrupts. * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, */ - iwl_write32(priv, CSR_INT, priv->inta); + /* There is a hardware bug in the interrupt mask function that some + * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if + * they are disabled in the CSR_INT_MASK register. Furthermore the + * ICT interrupt handling mechanism has another bug that might cause + * these unmasked interrupts fail to be detected. We workaround the + * hardware bugs here by ACKing all the possible interrupts so that + * interrupt coalescing can still be achieved. + */ + iwl_write32(priv, CSR_INT, priv->inta | ~priv->inta_mask); inta = priv->inta; -- cgit v1.2.3 From 71976907842abb71a0e6fda081e1d16e00420849 Mon Sep 17 00:00:00 2001 From: Gertjan van Wingerde Date: Wed, 24 Mar 2010 21:42:36 +0100 Subject: rt2x00: Fix typo in RF register programming of rt2800. Signed-off-by: Gertjan van Wingerde Acked-by: Ivo van Doorn Signed-off-by: John W. Linville --- drivers/net/wireless/rt2x00/rt2800lib.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 18d4d8e4ae6b..326fce78489d 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -812,9 +812,9 @@ static void rt2800_config_channel_rt3x(struct rt2x00_dev *rt2x00dev, rt2800_rfcsr_write(rt2x00dev, 24, rt2x00dev->calibration[conf_is_ht40(conf)]); - rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr); + rt2800_rfcsr_read(rt2x00dev, 7, &rfcsr); rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1); - rt2800_rfcsr_write(rt2x00dev, 23, rfcsr); + rt2800_rfcsr_write(rt2x00dev, 7, rfcsr); } static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, -- cgit v1.2.3 From 9e76ad2a27f592c1390248867391880c7efe78b3 Mon Sep 17 00:00:00 2001 From: Gertjan van Wingerde Date: Wed, 24 Mar 2010 21:42:37 +0100 Subject: rt2x00: Disable powersaving by default in rt2500usb. Recent bug reports have shown that rt2500usb also suffers from the powersave problems that the PCI rt2x00 drivers suffer from. So disable powersaving by default for rt2500usb as well. Signed-off-by: Gertjan van Wingerde Acked-by: Ivo van Doorn Signed-off-by: John W. Linville --- drivers/net/wireless/rt2x00/rt2500usb.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c index ee34c137e7cd..dbaa78138437 100644 --- a/drivers/net/wireless/rt2x00/rt2500usb.c +++ b/drivers/net/wireless/rt2x00/rt2500usb.c @@ -1642,6 +1642,11 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev) char *tx_power; unsigned int i; + /* + * Disable powersaving as default. + */ + rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; + /* * Initialize all hw fields. */ -- cgit v1.2.3 From 2d20c72c021d96f8b9230396c8e3782f204214ec Mon Sep 17 00:00:00 2001 From: Valentin Longchamp Date: Fri, 26 Mar 2010 11:44:33 +0100 Subject: setup correct int pipe type in ar9170_usb_exec_cmd An int urb is constructed but we fill it in with a bulk pipe type. Commit f661c6f8c67bd55e93348f160d590ff9edf08904 implemented a pipe type check when CONFIG_USB_DEBUG is enabled. The check failed for all the ar9170 usb transfers and the driver could not configure the wifi dongle. This went unnoticed until now because most people don't have CONFIG_USB_DEBUG enabled. Signed-off-by: Valentin Longchamp Cc: Stable Acked-by: Christian Lamparter Signed-off-by: John W. Linville --- drivers/net/wireless/ath/ar9170/usb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c index c18575070061..6b1cb706e410 100644 --- a/drivers/net/wireless/ath/ar9170/usb.c +++ b/drivers/net/wireless/ath/ar9170/usb.c @@ -418,7 +418,7 @@ static int ar9170_usb_exec_cmd(struct ar9170 *ar, enum ar9170_cmd cmd, spin_unlock_irqrestore(&aru->common.cmdlock, flags); usb_fill_int_urb(urb, aru->udev, - usb_sndbulkpipe(aru->udev, AR9170_EP_CMD), + usb_sndintpipe(aru->udev, AR9170_EP_CMD), aru->common.cmdbuf, plen + 4, ar9170_usb_tx_urb_complete, NULL, 1); -- cgit v1.2.3 From 8e1a53c615e8efe0fac670f2973da64758748a8a Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Sun, 28 Mar 2010 14:55:00 +0300 Subject: iwlwifi: range checking issue IWL_RATE_COUNT is 13 and IWL_RATE_COUNT_LEGACY is 12. IWL_RATE_COUNT_LEGACY is the right one here because iwl3945_rates doesn't support 60M and also that's how "rates" is defined in iwlcore_init_geos() from drivers/net/wireless/iwlwifi/iwl-core.c. rates = kzalloc((sizeof(struct ieee80211_rate) * IWL_RATE_COUNT_LEGACY), GFP_KERNEL); Signed-off-by: Dan Carpenter Cc: stable@kernel.org Acked-by: Zhu Yi Signed-off-by: John W. Linville --- drivers/net/wireless/iwlwifi/iwl3945-base.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index f93013e9b17d..e276f2a4e835 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c @@ -1955,7 +1955,7 @@ static void iwl3945_init_hw_rates(struct iwl_priv *priv, { int i; - for (i = 0; i < IWL_RATE_COUNT; i++) { + for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) { rates[i].bitrate = iwl3945_rates[i].ieee * 5; rates[i].hw_value = i; /* Rate scaling will work on indexes */ rates[i].hw_value_short = i; -- cgit v1.2.3 From 7371400431389e1df6a2a05ab9882055b8a6ff2c Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Mon, 29 Mar 2010 17:14:18 +0200 Subject: net/wireless/libertas: do not call wiphy_unregister() w/o wiphy_register() The libertas driver calls wiphy_unregister() without a prior wiphy_register() when a devices fails initialization. Fix this by introducing a private flag. [ 9.310000] Unable to handle kernel NULL pointer dereference at virtual address 00000000 [...] [ 9.330000] [] (wiphy_unregister+0xfc/0x19c) from [] (lbs_cfg_free+0x70/0x9c [libertas]) [ 9.330000] [] (lbs_cfg_free+0x70/0x9c [libertas]) from [] (lbs_remove_card+0x180/0x210 [libertas]) [ 9.330000] [] (lbs_remove_card+0x180/0x210 [libertas]) from [] (if_sdio_probe+0xdc4/0xef4 [libertas_sdio]) [ 9.330000] [] (if_sdio_probe+0xdc4/0xef4 [libertas_sdio]) from [] (sdio_bus_probe+0xd4/0xf0) [ 9.330000] [] (sdio_bus_probe+0xd4/0xf0) from [] (driver_probe_device+0xa4/0x174) [ 9.330000] [] (driver_probe_device+0xa4/0x174) from [] (__driver_attach+0x60/0x84) [ 9.330000] [] (__driver_attach+0x60/0x84) from [] (bus_for_each_dev+0x4c/0x8c) [ 9.330000] [] (bus_for_each_dev+0x4c/0x8c) from [] (bus_add_driver+0xa0/0x228) [ 9.330000] [] (bus_add_driver+0xa0/0x228) from [] (driver_register+0xc0/0x150) [ 9.330000] [] (driver_register+0xc0/0x150) from [] (if_sdio_init_module+0x6c/0x108 [libertas_sdio]) [ 9.330000] [] (if_sdio_init_module+0x6c/0x108 [libertas_sdio]) from [] (do_one_initcall+0x5c/0x1bc) [ 9.330000] [] (do_one_initcall+0x5c/0x1bc) from [] (sys_init_module+0xc0/0x1f0) [ 9.330000] [] (sys_init_module+0xc0/0x1f0) from [] (ret_fast_syscall+0x0/0x30) Signed-off-by: Daniel Mack Cc: Dan Williams Cc: John W. Linville Cc: Holger Schurig Cc: Bing Zhao Cc: libertas-dev@lists.infradead.org Cc: linux-wireless@vger.kernel.org Cc: netdev@vger.kernel.org Signed-off-by: John W. Linville --- drivers/net/wireless/libertas/cfg.c | 8 ++++++-- drivers/net/wireless/libertas/dev.h | 1 + 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c index 4396dccd12ac..82ebe1461a77 100644 --- a/drivers/net/wireless/libertas/cfg.c +++ b/drivers/net/wireless/libertas/cfg.c @@ -172,6 +172,8 @@ int lbs_cfg_register(struct lbs_private *priv) if (ret < 0) lbs_pr_err("cannot register wiphy device\n"); + priv->wiphy_registered = true; + ret = register_netdev(priv->dev); if (ret) lbs_pr_err("cannot register network device\n"); @@ -190,9 +192,11 @@ void lbs_cfg_free(struct lbs_private *priv) if (!wdev) return; - if (wdev->wiphy) { + if (priv->wiphy_registered) wiphy_unregister(wdev->wiphy); + + if (wdev->wiphy) wiphy_free(wdev->wiphy); - } + kfree(wdev); } diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h index 6977ee820214..6875e1498bd5 100644 --- a/drivers/net/wireless/libertas/dev.h +++ b/drivers/net/wireless/libertas/dev.h @@ -36,6 +36,7 @@ struct lbs_private { /* CFG80211 */ struct wireless_dev *wdev; + bool wiphy_registered; /* Mesh */ struct net_device *mesh_dev; /* Virtual device */ -- cgit v1.2.3 From baff42ab1494528907bf4d5870359e31711746ae Mon Sep 17 00:00:00 2001 From: "Steven J. Magnani" Date: Tue, 30 Mar 2010 13:56:01 -0700 Subject: net: Fix oops from tcp_collapse() when using splice() tcp_read_sock() can have a eat skbs without immediately advancing copied_seq. This can cause a panic in tcp_collapse() if it is called as a result of the recv_actor dropping the socket lock. A userspace program that splices data from a socket to either another socket or to a file can trigger this bug. Signed-off-by: Steven J. Magnani Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 6afb6d8662b2..2c75f891914e 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1368,6 +1368,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, sk_eat_skb(sk, skb, 0); if (!desc->count) break; + tp->copied_seq = seq; } tp->copied_seq = seq; -- cgit v1.2.3 From 7c0d10d35f7f47d00cc5f2b85ee5e95c2b1fdb7e Mon Sep 17 00:00:00 2001 From: Andy Fleming Date: Mon, 29 Mar 2010 15:42:23 +0000 Subject: gianfar: Fix a memory leak in gianfar close code gianfar needed to ensure existence of the *skbuff arrays before freeing the skbs in them, rather than ensuring their nonexistence. Signed-off-by: Andy Fleming Signed-off-by: David S. Miller --- drivers/net/gianfar.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 669de028d44f..c98fead8412b 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -1638,13 +1638,13 @@ static void free_skb_resources(struct gfar_private *priv) /* Go through all the buffer descriptors and free their data buffers */ for (i = 0; i < priv->num_tx_queues; i++) { tx_queue = priv->tx_queue[i]; - if(!tx_queue->tx_skbuff) + if(tx_queue->tx_skbuff) free_skb_tx_queue(tx_queue); } for (i = 0; i < priv->num_rx_queues; i++) { rx_queue = priv->rx_queue[i]; - if(!rx_queue->rx_skbuff) + if(rx_queue->rx_skbuff) free_skb_rx_queue(rx_queue); } -- cgit v1.2.3 From ed130589d9afa3238c94b9537f2024355b9638e1 Mon Sep 17 00:00:00 2001 From: Kim Phillips Date: Tue, 30 Mar 2010 11:54:21 +0000 Subject: net: gianfar - initialize per-queue statistics Interfaces come up claiming having already received 3.0 GiB. Use kzalloc to properly initialize per-queue data. Signed-off-by: Kim Phillips Signed-off-by: David S. Miller --- drivers/net/gianfar.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index c98fead8412b..8ea78353d1e2 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -676,7 +676,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev) priv->rx_queue[i] = NULL; for (i = 0; i < priv->num_tx_queues; i++) { - priv->tx_queue[i] = (struct gfar_priv_tx_q *)kmalloc( + priv->tx_queue[i] = (struct gfar_priv_tx_q *)kzalloc( sizeof (struct gfar_priv_tx_q), GFP_KERNEL); if (!priv->tx_queue[i]) { err = -ENOMEM; @@ -689,7 +689,7 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev) } for (i = 0; i < priv->num_rx_queues; i++) { - priv->rx_queue[i] = (struct gfar_priv_rx_q *)kmalloc( + priv->rx_queue[i] = (struct gfar_priv_rx_q *)kzalloc( sizeof (struct gfar_priv_rx_q), GFP_KERNEL); if (!priv->rx_queue[i]) { err = -ENOMEM; -- cgit v1.2.3 From ddc01b3b8ab224b346daf61976078b166f36b7e8 Mon Sep 17 00:00:00 2001 From: Kim Phillips Date: Tue, 30 Mar 2010 11:54:22 +0000 Subject: net: gianfar - align BD ring size console messages fix this: eth2: :RX BD ring size for Q[0]: 256 eth2:TX BD ring size for Q[0]: 256 to look like: eth2: RX BD ring size for Q[0]: 256 eth2: TX BD ring size for Q[0]: 256 Signed-off-by: Kim Phillips Signed-off-by: David S. Miller --- drivers/net/gianfar.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 8ea78353d1e2..080d1cea5b26 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -1120,10 +1120,10 @@ static int gfar_probe(struct of_device *ofdev, /* provided which set of benchmarks. */ printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); for (i = 0; i < priv->num_rx_queues; i++) - printk(KERN_INFO "%s: :RX BD ring size for Q[%d]: %d\n", + printk(KERN_INFO "%s: RX BD ring size for Q[%d]: %d\n", dev->name, i, priv->rx_queue[i]->rx_ring_size); for(i = 0; i < priv->num_tx_queues; i++) - printk(KERN_INFO "%s:TX BD ring size for Q[%d]: %d\n", + printk(KERN_INFO "%s: TX BD ring size for Q[%d]: %d\n", dev->name, i, priv->tx_queue[i]->tx_ring_size); return 0; -- cgit v1.2.3 From ce6fbdefb68d46db88170494b277551f955b48e2 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Mon, 29 Mar 2010 05:35:05 +0000 Subject: drivers/net: Add missing unlock Unlock the lock before leaving the function. A simplified version of the semantic patch that finds this problem is as follows: (http://coccinelle.lip6.fr/) // @r exists@ expression E1; identifier f; @@ f (...) { <+... * spin_lock_irqsave (E1,...); ... when != E1 * return ...; ...+> } // Signed-off-by: Julia Lawall Signed-off-by: David S. Miller --- drivers/net/sgiseeq.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c index ed999d31f1fa..37f6a00cde17 100644 --- a/drivers/net/sgiseeq.c +++ b/drivers/net/sgiseeq.c @@ -592,8 +592,10 @@ static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Setup... */ len = skb->len; if (len < ETH_ZLEN) { - if (skb_padto(skb, ETH_ZLEN)) + if (skb_padto(skb, ETH_ZLEN)) { + spin_unlock_irqrestore(&sp->tx_lock, flags); return NETDEV_TX_OK; + } len = ETH_ZLEN; } -- cgit v1.2.3 From 00ae702847df5566ce9182e9c895185e2ad1c181 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 30 Mar 2010 23:08:37 +0000 Subject: bonding: bond_xmit_roundrobin() fix Commit a2fd940f (bonding: fix broken multicast with round-robin mode) added a problem on litle endian machines. drivers/net/bonding/bond_main.c:4159: warning: comparison is always false due to limited range of data type Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 5b92fbff431d..5972a52a330c 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -4156,7 +4156,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev * send the join/membership reports. The curr_active_slave found * will send all of this type of traffic. */ - if ((iph->protocol == htons(IPPROTO_IGMP)) && + if ((iph->protocol == IPPROTO_IGMP) && (skb->protocol == htons(ETH_P_IP))) { read_lock(&bond->curr_slave_lock); -- cgit v1.2.3 From f510fc64cce4646a1fd3c7e5ba7e36cad7e98f02 Mon Sep 17 00:00:00 2001 From: Ajit Khaparde Date: Wed, 31 Mar 2010 01:47:45 +0000 Subject: be2net: fix a bug in flashing the redboot section Signed-off-by: Ajit Khaparde Signed-off-by: David S. Miller --- drivers/net/benet/be_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 43e8032f9236..a08faf3c8e1c 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c @@ -1855,7 +1855,7 @@ static bool be_flash_redboot(struct be_adapter *adapter, p += crc_offset; status = be_cmd_get_flash_crc(adapter, flashed_crc, - (img_start + image_size - 4)); + (image_size - 4)); if (status) { dev_err(&adapter->pdev->dev, "could not get crc from flash, not flashing redboot\n"); -- cgit v1.2.3 From 8b93b710a9cd70d67013b4b0f00df7dfda058064 Mon Sep 17 00:00:00 2001 From: Ajit Khaparde Date: Wed, 31 Mar 2010 01:57:10 +0000 Subject: be2net: fix flashing on big endian architectures Flashing is broken on big endian architectures like ppc. This patch fixes it. From: Naresh G Signed-off-by: Ajit Khaparde Signed-off-by: David S. Miller --- drivers/net/benet/be_cmds.c | 4 ++-- drivers/net/benet/be_main.c | 15 +++++++-------- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index 50e6259b50e4..d0ef4ac987cd 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c @@ -1464,8 +1464,8 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT); req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT); - req->params.offset = offset; - req->params.data_buf_size = 0x4; + req->params.offset = cpu_to_le32(offset); + req->params.data_buf_size = cpu_to_le32(0x4); status = be_mcc_notify_wait(adapter); if (!status) diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index a08faf3c8e1c..b0faaa204c7c 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c @@ -1991,7 +1991,7 @@ int be_load_fw(struct be_adapter *adapter, u8 *func) struct flash_file_hdr_g3 *fhdr3; struct image_hdr *img_hdr_ptr = NULL; struct be_dma_mem flash_cmd; - int status, i = 0; + int status, i = 0, num_imgs = 0; const u8 *p; strcpy(fw_file, func); @@ -2017,15 +2017,14 @@ int be_load_fw(struct be_adapter *adapter, u8 *func) if ((adapter->generation == BE_GEN3) && (get_ufigen_type(fhdr) == BE_GEN3)) { fhdr3 = (struct flash_file_hdr_g3 *) fw->data; - for (i = 0; i < fhdr3->num_imgs; i++) { + num_imgs = le32_to_cpu(fhdr3->num_imgs); + for (i = 0; i < num_imgs; i++) { img_hdr_ptr = (struct image_hdr *) (fw->data + (sizeof(struct flash_file_hdr_g3) + - i * sizeof(struct image_hdr))); - if (img_hdr_ptr->imageid == 1) { - status = be_flash_data(adapter, fw, - &flash_cmd, fhdr3->num_imgs); - } - + i * sizeof(struct image_hdr))); + if (le32_to_cpu(img_hdr_ptr->imageid) == 1) + status = be_flash_data(adapter, fw, &flash_cmd, + num_imgs); } } else if ((adapter->generation == BE_GEN2) && (get_ufigen_type(fhdr) == BE_GEN2)) { -- cgit v1.2.3 From 9cae9e4f8b5887d8ef46fc56c7ca97814ae741ce Mon Sep 17 00:00:00 2001 From: Ajit Khaparde Date: Wed, 31 Mar 2010 02:00:32 +0000 Subject: be2net: fix bug in vlan rx path for big endian architecture vlan traffic on big endian architecture is broken. Need to swap the vid before giving packet to stack. This patch fixes it. Signed-off-by: Ajit Khaparde Signed-off-by: David S. Miller --- drivers/net/benet/be_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index b0faaa204c7c..ec6ace802256 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c @@ -807,7 +807,7 @@ static void be_rx_compl_process(struct be_adapter *adapter, return; } vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); - vid = be16_to_cpu(vid); + vid = swab16(vid); vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid); } else { netif_receive_skb(skb); @@ -884,7 +884,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter, napi_gro_frags(&eq_obj->napi); } else { vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); - vid = be16_to_cpu(vid); + vid = swab16(vid); if (!adapter->vlan_grp || adapter->vlans_added == 0) return; -- cgit v1.2.3 From 7ba8a9b4f92e9559933af305c9b11e9beb97f9ea Mon Sep 17 00:00:00 2001 From: Carmelo AMOROSO Date: Wed, 31 Mar 2010 21:44:03 +0000 Subject: stmmac: fix kconfig for crc32 build error stmmac uses crc32 functions so it needs to select CRC32. Fixes build error: drivers/built-in.o: In function `dwmac1000_set_filter': dwmac1000_core.c:(.text+0x3c380): undefined reference to `crc32_le' dwmac1000_core.c:(.text+0x3c384): undefined reference to `bitrev32' Signed-off-by: Carmelo Amoroso Signed-off-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/stmmac/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/stmmac/Kconfig b/drivers/net/stmmac/Kconfig index fb287649a305..eb63d44748a7 100644 --- a/drivers/net/stmmac/Kconfig +++ b/drivers/net/stmmac/Kconfig @@ -2,6 +2,7 @@ config STMMAC_ETH tristate "STMicroelectronics 10/100/1000 Ethernet driver" select MII select PHYLIB + select CRC32 depends on NETDEVICES && CPU_SUBTYPE_ST40 help This is the driver for the Ethernet IPs are built around a -- cgit v1.2.3 From a1d6f3f65512cc90a636e6ec653b7bc9e2238753 Mon Sep 17 00:00:00 2001 From: Giuseppe CAVALLARO Date: Wed, 31 Mar 2010 21:44:04 +0000 Subject: stmmac: add documentation for the driver. Add Documentation/networking/stmmac.txt for the stmmac network driver. Signed-off-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- Documentation/networking/stmmac.txt | 143 ++++++++++++++++++++++++++++++++++++ 1 file changed, 143 insertions(+) create mode 100644 Documentation/networking/stmmac.txt diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt new file mode 100644 index 000000000000..7ee770b5ef5f --- /dev/null +++ b/Documentation/networking/stmmac.txt @@ -0,0 +1,143 @@ + STMicroelectronics 10/100/1000 Synopsys Ethernet driver + +Copyright (C) 2007-2010 STMicroelectronics Ltd +Author: Giuseppe Cavallaro + +This is the driver for the MAC 10/100/1000 on-chip Ethernet controllers +(Synopsys IP blocks); it has been fully tested on STLinux platforms. + +Currently this network device driver is for all STM embedded MAC/GMAC +(7xxx SoCs). + +DWC Ether MAC 10/100/1000 Universal version 3.41a and DWC Ether MAC 10/100 +Universal version 4.0 have been used for developing the first code +implementation. + +Please, for more information also visit: www.stlinux.com + +1) Kernel Configuration +The kernel configuration option is STMMAC_ETH: + Device Drivers ---> Network device support ---> Ethernet (1000 Mbit) ---> + STMicroelectronics 10/100/1000 Ethernet driver (STMMAC_ETH) + +2) Driver parameters list: + debug: message level (0: no output, 16: all); + phyaddr: to manually provide the physical address to the PHY device; + dma_rxsize: DMA rx ring size; + dma_txsize: DMA tx ring size; + buf_sz: DMA buffer size; + tc: control the HW FIFO threshold; + tx_coe: Enable/Disable Tx Checksum Offload engine; + watchdog: transmit timeout (in milliseconds); + flow_ctrl: Flow control ability [on/off]; + pause: Flow Control Pause Time; + tmrate: timer period (only if timer optimisation is configured). + +3) Command line options +Driver parameters can be also passed in command line by using: + stmmaceth=dma_rxsize:128,dma_txsize:512 + +4) Driver information and notes + +4.1) Transmit process +The xmit method is invoked when the kernel needs to transmit a packet; it sets +the descriptors in the ring and informs the DMA engine that there is a packet +ready to be transmitted. +Once the controller has finished transmitting the packet, an interrupt is +triggered; So the driver will be able to release the socket buffers. +By default, the driver sets the NETIF_F_SG bit in the features field of the +net_device structure enabling the scatter/gather feature. + +4.2) Receive process +When one or more packets are received, an interrupt happens. The interrupts +are not queued so the driver has to scan all the descriptors in the ring during +the receive process. +This is based on NAPI so the interrupt handler signals only if there is work to be +done, and it exits. +Then the poll method will be scheduled at some future point. +The incoming packets are stored, by the DMA, in a list of pre-allocated socket +buffers in order to avoid the memcpy (Zero-copy). + +4.3) Timer-Driver Interrupt +Instead of having the device that asynchronously notifies the frame receptions, the +driver configures a timer to generate an interrupt at regular intervals. +Based on the granularity of the timer, the frames that are received by the device +will experience different levels of latency. Some NICs have dedicated timer +device to perform this task. STMMAC can use either the RTC device or the TMU +channel 2 on STLinux platforms. +The timers frequency can be passed to the driver as parameter; when change it, +take care of both hardware capability and network stability/performance impact. +Several performance tests on STM platforms showed this optimisation allows to spare +the CPU while having the maximum throughput. + +4.4) WOL +Wake up on Lan feature through Magic Frame is only supported for the GMAC +core. + +4.5) DMA descriptors +Driver handles both normal and enhanced descriptors. The latter has been only +tested on DWC Ether MAC 10/100/1000 Universal version 3.41a. + +4.6) Ethtool support +Ethtool is supported. Driver statistics and internal errors can be taken using: +ethtool -S ethX command. It is possible to dump registers etc. + +4.7) Jumbo and Segmentation Offloading +Jumbo frames are supported and tested for the GMAC. +The GSO has been also added but it's performed in software. +LRO is not supported. + +4.8) Physical +The driver is compatible with PAL to work with PHY and GPHY devices. + +4.9) Platform information +Several information came from the platform; please refer to the +driver's Header file in include/linux directory. + +struct plat_stmmacenet_data { + int bus_id; + int pbl; + int has_gmac; + void (*fix_mac_speed)(void *priv, unsigned int speed); + void (*bus_setup)(unsigned long ioaddr); +#ifdef CONFIG_STM_DRIVERS + struct stm_pad_config *pad_config; +#endif + void *bsp_priv; +}; + +Where: +- pbl (Programmable Burst Length) is maximum number of + beats to be transferred in one DMA transaction. + GMAC also enables the 4xPBL by default. +- fix_mac_speed and bus_setup are used to configure internal target + registers (on STM platforms); +- has_gmac: GMAC core is on board (get it at run-time in the next step); +- bus_id: bus identifier. + +struct plat_stmmacphy_data { + int bus_id; + int phy_addr; + unsigned int phy_mask; + int interface; + int (*phy_reset)(void *priv); + void *priv; +}; + +Where: +- bus_id: bus identifier; +- phy_addr: physical address used for the attached phy device; + set it to -1 to get it at run-time; +- interface: physical MII interface mode; +- phy_reset: hook to reset HW function. + +TODO: +- Continue to make the driver more generic and suitable for other Synopsys + Ethernet controllers used on other architectures (i.e. ARM). +- 10G controllers are not supported. +- MAC uses Normal descriptors and GMAC uses enhanced ones. + This is a limit that should be reviewed. MAC could want to + use the enhanced structure. +- Checksumming: Rx/Tx csum is done in HW in case of GMAC only. +- Review the timer optimisation code to use an embedded device that seems to be + available in new chip generations. -- cgit v1.2.3 From 6503d96168f891ffa3b70ae6c9698a1a722025a0 Mon Sep 17 00:00:00 2001 From: Changli Gao Date: Wed, 31 Mar 2010 22:58:26 +0000 Subject: net: check the length of the socket address passed to connect(2) check the length of the socket address passed to connect(2). Check the length of the socket address passed to connect(2). If the length is invalid, -EINVAL will be returned. Signed-off-by: Changli Gao ---- net/bluetooth/l2cap.c | 3 ++- net/bluetooth/rfcomm/sock.c | 3 ++- net/bluetooth/sco.c | 3 ++- net/can/bcm.c | 3 +++ net/ieee802154/af_ieee802154.c | 3 +++ net/ipv4/af_inet.c | 5 +++++ net/netlink/af_netlink.c | 3 +++ 7 files changed, 20 insertions(+), 3 deletions(-) Signed-off-by: David S. Miller --- net/bluetooth/l2cap.c | 3 ++- net/bluetooth/rfcomm/sock.c | 3 ++- net/bluetooth/sco.c | 3 ++- net/can/bcm.c | 3 +++ net/ieee802154/af_ieee802154.c | 3 +++ net/ipv4/af_inet.c | 5 +++++ net/netlink/af_netlink.c | 3 +++ 7 files changed, 20 insertions(+), 3 deletions(-) diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index 7794a2e2adce..99d68c34e4f1 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c @@ -1002,7 +1002,8 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al BT_DBG("sk %p", sk); - if (!addr || addr->sa_family != AF_BLUETOOTH) + if (!addr || alen < sizeof(addr->sa_family) || + addr->sa_family != AF_BLUETOOTH) return -EINVAL; memset(&la, 0, sizeof(la)); diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 7f439765403d..8ed3c37684fa 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -397,7 +397,8 @@ static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int a BT_DBG("sk %p", sk); - if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_rc)) + if (alen < sizeof(struct sockaddr_rc) || + addr->sa_family != AF_BLUETOOTH) return -EINVAL; lock_sock(sk); diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index e5b16b76b22e..ca6b2ad1c3fc 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -499,7 +499,8 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen BT_DBG("sk %p", sk); - if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_sco)) + if (alen < sizeof(struct sockaddr_sco) || + addr->sa_family != AF_BLUETOOTH) return -EINVAL; if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) diff --git a/net/can/bcm.c b/net/can/bcm.c index e32af52238a2..629ad1debe81 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -1478,6 +1478,9 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, struct sock *sk = sock->sk; struct bcm_sock *bo = bcm_sk(sk); + if (len < sizeof(*addr)) + return -EINVAL; + if (bo->bound) return -EISCONN; diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c index bad1c49fd960..01beb6c11205 100644 --- a/net/ieee802154/af_ieee802154.c +++ b/net/ieee802154/af_ieee802154.c @@ -126,6 +126,9 @@ static int ieee802154_sock_connect(struct socket *sock, struct sockaddr *uaddr, { struct sock *sk = sock->sk; + if (addr_len < sizeof(uaddr->sa_family)) + return -EINVAL; + if (uaddr->sa_family == AF_UNSPEC) return sk->sk_prot->disconnect(sk, flags); diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 33b7dffa7732..a366861bf4cd 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -530,6 +530,8 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr, { struct sock *sk = sock->sk; + if (addr_len < sizeof(uaddr->sa_family)) + return -EINVAL; if (uaddr->sa_family == AF_UNSPEC) return sk->sk_prot->disconnect(sk, flags); @@ -573,6 +575,9 @@ int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, int err; long timeo; + if (addr_len < sizeof(uaddr->sa_family)) + return -EINVAL; + lock_sock(sk); if (uaddr->sa_family == AF_UNSPEC) { diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index acbbae1e89b5..795424396aff 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -683,6 +683,9 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr, struct netlink_sock *nlk = nlk_sk(sk); struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; + if (alen < sizeof(addr->sa_family)) + return -EINVAL; + if (addr->sa_family == AF_UNSPEC) { sk->sk_state = NETLINK_UNCONNECTED; nlk->dst_pid = 0; -- cgit v1.2.3 From 9e2e61fbf8ad016d24e4af0afff13505f3dd2a2a Mon Sep 17 00:00:00 2001 From: Amerigo Wang Date: Wed, 31 Mar 2010 21:30:52 +0000 Subject: bonding: fix potential deadlock in bond_uninit() bond_uninit() is invoked with rtnl_lock held, when it does destroy_workqueue() which will potentially flush all works in this workqueue, if we hold rtnl_lock again in the work function, it will deadlock. So move destroy_workqueue() to destructor where rtnl_lock is not held any more, suggested by Eric. Signed-off-by: WANG Cong Cc: Jay Vosburgh Cc: "David S. Miller" Cc: Stephen Hemminger Cc: Jiri Pirko Cc: "Eric W. Biederman" Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 5972a52a330c..0075514bf32f 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -4450,6 +4450,14 @@ static const struct net_device_ops bond_netdev_ops = { .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid, }; +static void bond_destructor(struct net_device *bond_dev) +{ + struct bonding *bond = netdev_priv(bond_dev); + if (bond->wq) + destroy_workqueue(bond->wq); + free_netdev(bond_dev); +} + static void bond_setup(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); @@ -4470,7 +4478,7 @@ static void bond_setup(struct net_device *bond_dev) bond_dev->ethtool_ops = &bond_ethtool_ops; bond_set_mode_ops(bond, bond->params.mode); - bond_dev->destructor = free_netdev; + bond_dev->destructor = bond_destructor; /* Initialize the device options */ bond_dev->tx_queue_len = 0; @@ -4542,9 +4550,6 @@ static void bond_uninit(struct net_device *bond_dev) bond_remove_proc_entry(bond); - if (bond->wq) - destroy_workqueue(bond->wq); - netif_addr_lock_bh(bond_dev); bond_mc_list_destroy(bond); netif_addr_unlock_bh(bond_dev); @@ -4956,8 +4961,8 @@ int bond_create(struct net *net, const char *name) bond_setup); if (!bond_dev) { pr_err("%s: eek! can't alloc netdev!\n", name); - res = -ENOMEM; - goto out; + rtnl_unlock(); + return -ENOMEM; } dev_net_set(bond_dev, net); @@ -4966,19 +4971,16 @@ int bond_create(struct net *net, const char *name) if (!name) { res = dev_alloc_name(bond_dev, "bond%d"); if (res < 0) - goto out_netdev; + goto out; } res = register_netdevice(bond_dev); - if (res < 0) - goto out_netdev; out: rtnl_unlock(); + if (res < 0) + bond_destructor(bond_dev); return res; -out_netdev: - free_netdev(bond_dev); - goto out; } static int __net_init bond_net_init(struct net *net) -- cgit v1.2.3 From b914f3a2a35812545f773645f340d7c075e5b64d Mon Sep 17 00:00:00 2001 From: Paul Moore Date: Thu, 1 Apr 2010 10:43:57 +0000 Subject: netlabel: Fix several rcu_dereference() calls used without RCU read locks The recent changes to add RCU lock verification to rcu_dereference() calls caught out a problem with netlbl_unlhsh_hash(), see below. =================================================== [ INFO: suspicious rcu_dereference_check() usage. ] --------------------------------------------------- net/netlabel/netlabel_unlabeled.c:246 invoked rcu_dereference_check() without protection! This patch fixes this problem as well as others like it in the NetLabel code. Also included in this patch is the identification of future work to eliminate the RCU read lock in netlbl_domhsh_add(), but in the interest of getting this patch out quickly that work will happen in another patch to be finished later. Thanks to Eric Dumazet and Paul McKenney for their help in understanding the recent RCU changes. Signed-off-by: Paul Moore Reported-by: David Howells CC: Eric Dumazet CC: Paul E. McKenney Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/netlabel/netlabel_domainhash.c | 28 ++++++++++------ net/netlabel/netlabel_unlabeled.c | 66 +++++++++++--------------------------- 2 files changed, 37 insertions(+), 57 deletions(-) diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c index 0bfeaab88ef5..06ab41b6b57a 100644 --- a/net/netlabel/netlabel_domainhash.c +++ b/net/netlabel/netlabel_domainhash.c @@ -50,9 +50,12 @@ struct netlbl_domhsh_tbl { }; /* Domain hash table */ -/* XXX - updates should be so rare that having one spinlock for the entire - * hash table should be okay */ +/* updates should be so rare that having one spinlock for the entire hash table + * should be okay */ static DEFINE_SPINLOCK(netlbl_domhsh_lock); +#define netlbl_domhsh_rcu_deref(p) \ + rcu_dereference_check(p, rcu_read_lock_held() || \ + lockdep_is_held(&netlbl_domhsh_lock)) static struct netlbl_domhsh_tbl *netlbl_domhsh = NULL; static struct netlbl_dom_map *netlbl_domhsh_def = NULL; @@ -106,7 +109,8 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry) * Description: * This is the hashing function for the domain hash table, it returns the * correct bucket number for the domain. The caller is responsibile for - * calling the rcu_read_[un]lock() functions. + * ensuring that the hash table is protected with either a RCU read lock or the + * hash table lock. * */ static u32 netlbl_domhsh_hash(const char *key) @@ -120,7 +124,7 @@ static u32 netlbl_domhsh_hash(const char *key) for (iter = 0, val = 0, len = strlen(key); iter < len; iter++) val = (val << 4 | (val >> (8 * sizeof(u32) - 4))) ^ key[iter]; - return val & (rcu_dereference(netlbl_domhsh)->size - 1); + return val & (netlbl_domhsh_rcu_deref(netlbl_domhsh)->size - 1); } /** @@ -130,7 +134,8 @@ static u32 netlbl_domhsh_hash(const char *key) * Description: * Searches the domain hash table and returns a pointer to the hash table * entry if found, otherwise NULL is returned. The caller is responsibile for - * the rcu hash table locks (i.e. the caller much call rcu_read_[un]lock()). + * ensuring that the hash table is protected with either a RCU read lock or the + * hash table lock. * */ static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain) @@ -141,7 +146,7 @@ static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain) if (domain != NULL) { bkt = netlbl_domhsh_hash(domain); - bkt_list = &rcu_dereference(netlbl_domhsh)->tbl[bkt]; + bkt_list = &netlbl_domhsh_rcu_deref(netlbl_domhsh)->tbl[bkt]; list_for_each_entry_rcu(iter, bkt_list, list) if (iter->valid && strcmp(iter->domain, domain) == 0) return iter; @@ -159,8 +164,8 @@ static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain) * Searches the domain hash table and returns a pointer to the hash table * entry if an exact match is found, if an exact match is not present in the * hash table then the default entry is returned if valid otherwise NULL is - * returned. The caller is responsibile for the rcu hash table locks - * (i.e. the caller much call rcu_read_[un]lock()). + * returned. The caller is responsibile ensuring that the hash table is + * protected with either a RCU read lock or the hash table lock. * */ static struct netlbl_dom_map *netlbl_domhsh_search_def(const char *domain) @@ -169,7 +174,7 @@ static struct netlbl_dom_map *netlbl_domhsh_search_def(const char *domain) entry = netlbl_domhsh_search(domain); if (entry == NULL) { - entry = rcu_dereference(netlbl_domhsh_def); + entry = netlbl_domhsh_rcu_deref(netlbl_domhsh_def); if (entry != NULL && !entry->valid) entry = NULL; } @@ -306,8 +311,11 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry, struct netlbl_af6list *tmp6; #endif /* IPv6 */ + /* XXX - we can remove this RCU read lock as the spinlock protects the + * entire function, but before we do we need to fixup the + * netlbl_af[4,6]list RCU functions to do "the right thing" with + * respect to rcu_dereference() when only a spinlock is held. */ rcu_read_lock(); - spin_lock(&netlbl_domhsh_lock); if (entry->domain != NULL) entry_old = netlbl_domhsh_search(entry->domain); diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index 852d9d7976b9..3b4fde7622a3 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c @@ -114,6 +114,9 @@ struct netlbl_unlhsh_walk_arg { /* updates should be so rare that having one spinlock for the entire * hash table should be okay */ static DEFINE_SPINLOCK(netlbl_unlhsh_lock); +#define netlbl_unlhsh_rcu_deref(p) \ + rcu_dereference_check(p, rcu_read_lock_held() || \ + lockdep_is_held(&netlbl_unlhsh_lock)) static struct netlbl_unlhsh_tbl *netlbl_unlhsh = NULL; static struct netlbl_unlhsh_iface *netlbl_unlhsh_def = NULL; @@ -235,15 +238,13 @@ static void netlbl_unlhsh_free_iface(struct rcu_head *entry) * Description: * This is the hashing function for the unlabeled hash table, it returns the * bucket number for the given device/interface. The caller is responsible for - * calling the rcu_read_[un]lock() functions. + * ensuring that the hash table is protected with either a RCU read lock or + * the hash table lock. * */ static u32 netlbl_unlhsh_hash(int ifindex) { - /* this is taken _almost_ directly from - * security/selinux/netif.c:sel_netif_hasfn() as they do pretty much - * the same thing */ - return ifindex & (rcu_dereference(netlbl_unlhsh)->size - 1); + return ifindex & (netlbl_unlhsh_rcu_deref(netlbl_unlhsh)->size - 1); } /** @@ -253,7 +254,8 @@ static u32 netlbl_unlhsh_hash(int ifindex) * Description: * Searches the unlabeled connection hash table and returns a pointer to the * interface entry which matches @ifindex, otherwise NULL is returned. The - * caller is responsible for calling the rcu_read_[un]lock() functions. + * caller is responsible for ensuring that the hash table is protected with + * either a RCU read lock or the hash table lock. * */ static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface(int ifindex) @@ -263,7 +265,7 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface(int ifindex) struct netlbl_unlhsh_iface *iter; bkt = netlbl_unlhsh_hash(ifindex); - bkt_list = &rcu_dereference(netlbl_unlhsh)->tbl[bkt]; + bkt_list = &netlbl_unlhsh_rcu_deref(netlbl_unlhsh)->tbl[bkt]; list_for_each_entry_rcu(iter, bkt_list, list) if (iter->valid && iter->ifindex == ifindex) return iter; @@ -271,33 +273,6 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface(int ifindex) return NULL; } -/** - * netlbl_unlhsh_search_iface_def - Search for a matching interface entry - * @ifindex: the network interface - * - * Description: - * Searches the unlabeled connection hash table and returns a pointer to the - * interface entry which matches @ifindex. If an exact match can not be found - * and there is a valid default entry, the default entry is returned, otherwise - * NULL is returned. The caller is responsible for calling the - * rcu_read_[un]lock() functions. - * - */ -static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface_def(int ifindex) -{ - struct netlbl_unlhsh_iface *entry; - - entry = netlbl_unlhsh_search_iface(ifindex); - if (entry != NULL) - return entry; - - entry = rcu_dereference(netlbl_unlhsh_def); - if (entry != NULL && entry->valid) - return entry; - - return NULL; -} - /** * netlbl_unlhsh_add_addr4 - Add a new IPv4 address entry to the hash table * @iface: the associated interface entry @@ -308,8 +283,7 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface_def(int ifindex) * Description: * Add a new address entry into the unlabeled connection hash table using the * interface entry specified by @iface. On success zero is returned, otherwise - * a negative value is returned. The caller is responsible for calling the - * rcu_read_[un]lock() functions. + * a negative value is returned. * */ static int netlbl_unlhsh_add_addr4(struct netlbl_unlhsh_iface *iface, @@ -349,8 +323,7 @@ static int netlbl_unlhsh_add_addr4(struct netlbl_unlhsh_iface *iface, * Description: * Add a new address entry into the unlabeled connection hash table using the * interface entry specified by @iface. On success zero is returned, otherwise - * a negative value is returned. The caller is responsible for calling the - * rcu_read_[un]lock() functions. + * a negative value is returned. * */ static int netlbl_unlhsh_add_addr6(struct netlbl_unlhsh_iface *iface, @@ -391,8 +364,7 @@ static int netlbl_unlhsh_add_addr6(struct netlbl_unlhsh_iface *iface, * Description: * Add a new, empty, interface entry into the unlabeled connection hash table. * On success a pointer to the new interface entry is returned, on failure NULL - * is returned. The caller is responsible for calling the rcu_read_[un]lock() - * functions. + * is returned. * */ static struct netlbl_unlhsh_iface *netlbl_unlhsh_add_iface(int ifindex) @@ -415,10 +387,10 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_add_iface(int ifindex) if (netlbl_unlhsh_search_iface(ifindex) != NULL) goto add_iface_failure; list_add_tail_rcu(&iface->list, - &rcu_dereference(netlbl_unlhsh)->tbl[bkt]); + &netlbl_unlhsh_rcu_deref(netlbl_unlhsh)->tbl[bkt]); } else { INIT_LIST_HEAD(&iface->list); - if (rcu_dereference(netlbl_unlhsh_def) != NULL) + if (netlbl_unlhsh_rcu_deref(netlbl_unlhsh_def) != NULL) goto add_iface_failure; rcu_assign_pointer(netlbl_unlhsh_def, iface); } @@ -548,8 +520,7 @@ unlhsh_add_return: * * Description: * Remove an IP address entry from the unlabeled connection hash table. - * Returns zero on success, negative values on failure. The caller is - * responsible for calling the rcu_read_[un]lock() functions. + * Returns zero on success, negative values on failure. * */ static int netlbl_unlhsh_remove_addr4(struct net *net, @@ -611,8 +582,7 @@ static int netlbl_unlhsh_remove_addr4(struct net *net, * * Description: * Remove an IP address entry from the unlabeled connection hash table. - * Returns zero on success, negative values on failure. The caller is - * responsible for calling the rcu_read_[un]lock() functions. + * Returns zero on success, negative values on failure. * */ static int netlbl_unlhsh_remove_addr6(struct net *net, @@ -1547,8 +1517,10 @@ int netlbl_unlabel_getattr(const struct sk_buff *skb, struct netlbl_unlhsh_iface *iface; rcu_read_lock(); - iface = netlbl_unlhsh_search_iface_def(skb->skb_iif); + iface = netlbl_unlhsh_search_iface(skb->skb_iif); if (iface == NULL) + iface = rcu_dereference(netlbl_unlhsh_def); + if (iface == NULL || !iface->valid) goto unlabel_getattr_nolabel; switch (family) { case PF_INET: { -- cgit v1.2.3 From bbc02c7e9d343c521f17dc06e8d8d7468639d154 Mon Sep 17 00:00:00 2001 From: Dimitris Michailidis Date: Thu, 1 Apr 2010 15:28:22 +0000 Subject: cxgb4: Add register, message, and FW definitions Signed-off-by: Dimitris Michailidis Signed-off-by: David S. Miller --- drivers/net/cxgb4/t4_msg.h | 664 ++++++++++++++++++ drivers/net/cxgb4/t4_regs.h | 878 +++++++++++++++++++++++ drivers/net/cxgb4/t4fw_api.h | 1580 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 3122 insertions(+) create mode 100644 drivers/net/cxgb4/t4_msg.h create mode 100644 drivers/net/cxgb4/t4_regs.h create mode 100644 drivers/net/cxgb4/t4fw_api.h diff --git a/drivers/net/cxgb4/t4_msg.h b/drivers/net/cxgb4/t4_msg.h new file mode 100644 index 000000000000..fdb117443144 --- /dev/null +++ b/drivers/net/cxgb4/t4_msg.h @@ -0,0 +1,664 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __T4_MSG_H +#define __T4_MSG_H + +#include + +enum { + CPL_PASS_OPEN_REQ = 0x1, + CPL_PASS_ACCEPT_RPL = 0x2, + CPL_ACT_OPEN_REQ = 0x3, + CPL_SET_TCB_FIELD = 0x5, + CPL_GET_TCB = 0x6, + CPL_CLOSE_CON_REQ = 0x8, + CPL_CLOSE_LISTSRV_REQ = 0x9, + CPL_ABORT_REQ = 0xA, + CPL_ABORT_RPL = 0xB, + CPL_RX_DATA_ACK = 0xD, + CPL_TX_PKT = 0xE, + CPL_L2T_WRITE_REQ = 0x12, + CPL_TID_RELEASE = 0x1A, + + CPL_CLOSE_LISTSRV_RPL = 0x20, + CPL_L2T_WRITE_RPL = 0x23, + CPL_PASS_OPEN_RPL = 0x24, + CPL_ACT_OPEN_RPL = 0x25, + CPL_PEER_CLOSE = 0x26, + CPL_ABORT_REQ_RSS = 0x2B, + CPL_ABORT_RPL_RSS = 0x2D, + + CPL_CLOSE_CON_RPL = 0x32, + CPL_ISCSI_HDR = 0x33, + CPL_RDMA_CQE = 0x35, + CPL_RDMA_CQE_READ_RSP = 0x36, + CPL_RDMA_CQE_ERR = 0x37, + CPL_RX_DATA = 0x39, + CPL_SET_TCB_RPL = 0x3A, + CPL_RX_PKT = 0x3B, + CPL_RX_DDP_COMPLETE = 0x3F, + + CPL_ACT_ESTABLISH = 0x40, + CPL_PASS_ESTABLISH = 0x41, + CPL_RX_DATA_DDP = 0x42, + CPL_PASS_ACCEPT_REQ = 0x44, + + CPL_RDMA_READ_REQ = 0x60, + + CPL_PASS_OPEN_REQ6 = 0x81, + CPL_ACT_OPEN_REQ6 = 0x83, + + CPL_RDMA_TERMINATE = 0xA2, + CPL_RDMA_WRITE = 0xA4, + CPL_SGE_EGR_UPDATE = 0xA5, + + CPL_TRACE_PKT = 0xB0, + + CPL_FW4_MSG = 0xC0, + CPL_FW4_PLD = 0xC1, + CPL_FW4_ACK = 0xC3, + + CPL_FW6_MSG = 0xE0, + CPL_FW6_PLD = 0xE1, + CPL_TX_PKT_LSO = 0xED, + CPL_TX_PKT_XT = 0xEE, + + NUM_CPL_CMDS +}; + +enum CPL_error { + CPL_ERR_NONE = 0, + CPL_ERR_TCAM_FULL = 3, + CPL_ERR_BAD_LENGTH = 15, + CPL_ERR_BAD_ROUTE = 18, + CPL_ERR_CONN_RESET = 20, + CPL_ERR_CONN_EXIST_SYNRECV = 21, + CPL_ERR_CONN_EXIST = 22, + CPL_ERR_ARP_MISS = 23, + CPL_ERR_BAD_SYN = 24, + CPL_ERR_CONN_TIMEDOUT = 30, + CPL_ERR_XMIT_TIMEDOUT = 31, + CPL_ERR_PERSIST_TIMEDOUT = 32, + CPL_ERR_FINWAIT2_TIMEDOUT = 33, + CPL_ERR_KEEPALIVE_TIMEDOUT = 34, + CPL_ERR_RTX_NEG_ADVICE = 35, + CPL_ERR_PERSIST_NEG_ADVICE = 36, + CPL_ERR_ABORT_FAILED = 42, + CPL_ERR_IWARP_FLM = 50, +}; + +enum { + ULP_MODE_NONE = 0, + ULP_MODE_ISCSI = 2, + ULP_MODE_RDMA = 4, + ULP_MODE_FCOE = 6, +}; + +enum { + ULP_CRC_HEADER = 1 << 0, + ULP_CRC_DATA = 1 << 1 +}; + +enum { + CPL_ABORT_SEND_RST = 0, + CPL_ABORT_NO_RST, +}; + +enum { /* TX_PKT_XT checksum types */ + TX_CSUM_TCP = 0, + TX_CSUM_UDP = 1, + TX_CSUM_CRC16 = 4, + TX_CSUM_CRC32 = 5, + TX_CSUM_CRC32C = 6, + TX_CSUM_FCOE = 7, + TX_CSUM_TCPIP = 8, + TX_CSUM_UDPIP = 9, + TX_CSUM_TCPIP6 = 10, + TX_CSUM_UDPIP6 = 11, + TX_CSUM_IP = 12, +}; + +union opcode_tid { + __be32 opcode_tid; + u8 opcode; +}; + +#define CPL_OPCODE(x) ((x) << 24) +#define MK_OPCODE_TID(opcode, tid) (CPL_OPCODE(opcode) | (tid)) +#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid) +#define GET_TID(cmd) (ntohl(OPCODE_TID(cmd)) & 0xFFFFFF) + +/* partitioning of TID fields that also carry a queue id */ +#define GET_TID_TID(x) ((x) & 0x3fff) +#define GET_TID_QID(x) (((x) >> 14) & 0x3ff) +#define TID_QID(x) ((x) << 14) + +struct rss_header { + u8 opcode; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 channel:2; + u8 filter_hit:1; + u8 filter_tid:1; + u8 hash_type:2; + u8 ipv6:1; + u8 send2fw:1; +#else + u8 send2fw:1; + u8 ipv6:1; + u8 hash_type:2; + u8 filter_tid:1; + u8 filter_hit:1; + u8 channel:2; +#endif + __be16 qid; + __be32 hash_val; +}; + +struct work_request_hdr { + __be32 wr_hi; + __be32 wr_mid; + __be64 wr_lo; +}; + +#define WR_HDR struct work_request_hdr wr + +struct cpl_pass_open_req { + WR_HDR; + union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be32 local_ip; + __be32 peer_ip; + __be64 opt0; +#define TX_CHAN(x) ((x) << 2) +#define DELACK(x) ((x) << 5) +#define ULP_MODE(x) ((x) << 8) +#define RCV_BUFSIZ(x) ((x) << 12) +#define DSCP(x) ((x) << 22) +#define SMAC_SEL(x) ((u64)(x) << 28) +#define L2T_IDX(x) ((u64)(x) << 36) +#define NAGLE(x) ((u64)(x) << 49) +#define WND_SCALE(x) ((u64)(x) << 50) +#define KEEP_ALIVE(x) ((u64)(x) << 54) +#define MSS_IDX(x) ((u64)(x) << 60) + __be64 opt1; +#define SYN_RSS_ENABLE (1 << 0) +#define SYN_RSS_QUEUE(x) ((x) << 2) +#define CONN_POLICY_ASK (1 << 22) +}; + +struct cpl_pass_open_req6 { + WR_HDR; + union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be64 local_ip_hi; + __be64 local_ip_lo; + __be64 peer_ip_hi; + __be64 peer_ip_lo; + __be64 opt0; + __be64 opt1; +}; + +struct cpl_pass_open_rpl { + union opcode_tid ot; + u8 rsvd[3]; + u8 status; +}; + +struct cpl_pass_accept_rpl { + WR_HDR; + union opcode_tid ot; + __be32 opt2; +#define RSS_QUEUE(x) ((x) << 0) +#define RSS_QUEUE_VALID (1 << 10) +#define RX_COALESCE_VALID(x) ((x) << 11) +#define RX_COALESCE(x) ((x) << 12) +#define TX_QUEUE(x) ((x) << 23) +#define RX_CHANNEL(x) ((x) << 26) +#define WND_SCALE_EN(x) ((x) << 28) +#define TSTAMPS_EN(x) ((x) << 29) +#define SACK_EN(x) ((x) << 30) + __be64 opt0; +}; + +struct cpl_act_open_req { + WR_HDR; + union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be32 local_ip; + __be32 peer_ip; + __be64 opt0; + __be32 params; + __be32 opt2; +}; + +struct cpl_act_open_req6 { + WR_HDR; + union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be64 local_ip_hi; + __be64 local_ip_lo; + __be64 peer_ip_hi; + __be64 peer_ip_lo; + __be64 opt0; + __be32 params; + __be32 opt2; +}; + +struct cpl_act_open_rpl { + union opcode_tid ot; + __be32 atid_status; +#define GET_AOPEN_STATUS(x) ((x) & 0xff) +#define GET_AOPEN_ATID(x) (((x) >> 8) & 0xffffff) +}; + +struct cpl_pass_establish { + union opcode_tid ot; + __be32 rsvd; + __be32 tos_stid; +#define GET_POPEN_TID(x) ((x) & 0xffffff) +#define GET_POPEN_TOS(x) (((x) >> 24) & 0xff) + __be16 mac_idx; + __be16 tcp_opt; +#define GET_TCPOPT_WSCALE_OK(x) (((x) >> 5) & 1) +#define GET_TCPOPT_SACK(x) (((x) >> 6) & 1) +#define GET_TCPOPT_TSTAMP(x) (((x) >> 7) & 1) +#define GET_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf) +#define GET_TCPOPT_MSS(x) (((x) >> 12) & 0xf) + __be32 snd_isn; + __be32 rcv_isn; +}; + +struct cpl_act_establish { + union opcode_tid ot; + __be32 rsvd; + __be32 tos_atid; + __be16 mac_idx; + __be16 tcp_opt; + __be32 snd_isn; + __be32 rcv_isn; +}; + +struct cpl_get_tcb { + WR_HDR; + union opcode_tid ot; + __be16 reply_ctrl; +#define QUEUENO(x) ((x) << 0) +#define REPLY_CHAN(x) ((x) << 14) +#define NO_REPLY(x) ((x) << 15) + __be16 cookie; +}; + +struct cpl_set_tcb_field { + WR_HDR; + union opcode_tid ot; + __be16 reply_ctrl; + __be16 word_cookie; +#define TCB_WORD(x) ((x) << 0) +#define TCB_COOKIE(x) ((x) << 5) + __be64 mask; + __be64 val; +}; + +struct cpl_set_tcb_rpl { + union opcode_tid ot; + __be16 rsvd; + u8 cookie; + u8 status; + __be64 oldval; +}; + +struct cpl_close_con_req { + WR_HDR; + union opcode_tid ot; + __be32 rsvd; +}; + +struct cpl_close_con_rpl { + union opcode_tid ot; + u8 rsvd[3]; + u8 status; + __be32 snd_nxt; + __be32 rcv_nxt; +}; + +struct cpl_close_listsvr_req { + WR_HDR; + union opcode_tid ot; + __be16 reply_ctrl; +#define LISTSVR_IPV6 (1 << 14) + __be16 rsvd; +}; + +struct cpl_close_listsvr_rpl { + union opcode_tid ot; + u8 rsvd[3]; + u8 status; +}; + +struct cpl_abort_req_rss { + union opcode_tid ot; + u8 rsvd[3]; + u8 status; +}; + +struct cpl_abort_req { + WR_HDR; + union opcode_tid ot; + __be32 rsvd0; + u8 rsvd1; + u8 cmd; + u8 rsvd2[6]; +}; + +struct cpl_abort_rpl_rss { + union opcode_tid ot; + u8 rsvd[3]; + u8 status; +}; + +struct cpl_abort_rpl { + WR_HDR; + union opcode_tid ot; + __be32 rsvd0; + u8 rsvd1; + u8 cmd; + u8 rsvd2[6]; +}; + +struct cpl_peer_close { + union opcode_tid ot; + __be32 rcv_nxt; +}; + +struct cpl_tid_release { + WR_HDR; + union opcode_tid ot; + __be32 rsvd; +}; + +struct cpl_tx_pkt_core { + __be32 ctrl0; +#define TXPKT_VF(x) ((x) << 0) +#define TXPKT_PF(x) ((x) << 8) +#define TXPKT_VF_VLD (1 << 11) +#define TXPKT_OVLAN_IDX(x) ((x) << 12) +#define TXPKT_INTF(x) ((x) << 16) +#define TXPKT_INS_OVLAN (1 << 21) +#define TXPKT_OPCODE(x) ((x) << 24) + __be16 pack; + __be16 len; + __be64 ctrl1; +#define TXPKT_CSUM_END(x) ((x) << 12) +#define TXPKT_CSUM_START(x) ((x) << 20) +#define TXPKT_IPHDR_LEN(x) ((u64)(x) << 20) +#define TXPKT_CSUM_LOC(x) ((u64)(x) << 30) +#define TXPKT_ETHHDR_LEN(x) ((u64)(x) << 34) +#define TXPKT_CSUM_TYPE(x) ((u64)(x) << 40) +#define TXPKT_VLAN(x) ((u64)(x) << 44) +#define TXPKT_VLAN_VLD (1ULL << 60) +#define TXPKT_IPCSUM_DIS (1ULL << 62) +#define TXPKT_L4CSUM_DIS (1ULL << 63) +}; + +struct cpl_tx_pkt { + WR_HDR; + struct cpl_tx_pkt_core c; +}; + +#define cpl_tx_pkt_xt cpl_tx_pkt + +struct cpl_tx_pkt_lso { + WR_HDR; + __be32 lso_ctrl; +#define LSO_TCPHDR_LEN(x) ((x) << 0) +#define LSO_IPHDR_LEN(x) ((x) << 4) +#define LSO_ETHHDR_LEN(x) ((x) << 16) +#define LSO_IPV6(x) ((x) << 20) +#define LSO_LAST_SLICE (1 << 22) +#define LSO_FIRST_SLICE (1 << 23) +#define LSO_OPCODE(x) ((x) << 24) + __be16 ipid_ofst; + __be16 mss; + __be32 seqno_offset; + __be32 len; + /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */ +}; + +struct cpl_iscsi_hdr { + union opcode_tid ot; + __be16 pdu_len_ddp; +#define ISCSI_PDU_LEN(x) ((x) & 0x7FFF) +#define ISCSI_DDP (1 << 15) + __be16 len; + __be32 seq; + __be16 urg; + u8 rsvd; + u8 status; +}; + +struct cpl_rx_data { + union opcode_tid ot; + __be16 rsvd; + __be16 len; + __be32 seq; + __be16 urg; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 dack_mode:2; + u8 psh:1; + u8 heartbeat:1; + u8 ddp_off:1; + u8 :3; +#else + u8 :3; + u8 ddp_off:1; + u8 heartbeat:1; + u8 psh:1; + u8 dack_mode:2; +#endif + u8 status; +}; + +struct cpl_rx_data_ack { + WR_HDR; + union opcode_tid ot; + __be32 credit_dack; +#define RX_CREDITS(x) ((x) << 0) +#define RX_FORCE_ACK(x) ((x) << 28) +}; + +struct cpl_rx_pkt { + u8 opcode; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 iff:4; + u8 csum_calc:1; + u8 ipmi_pkt:1; + u8 vlan_ex:1; + u8 ip_frag:1; +#else + u8 ip_frag:1; + u8 vlan_ex:1; + u8 ipmi_pkt:1; + u8 csum_calc:1; + u8 iff:4; +#endif + __be16 csum; + __be16 vlan; + __be16 len; + __be32 l2info; +#define RXF_UDP (1 << 22) +#define RXF_TCP (1 << 23) + __be16 hdr_len; + __be16 err_vec; +}; + +struct cpl_trace_pkt { + u8 opcode; + u8 intf; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 runt:4; + u8 filter_hit:4; + u8 :6; + u8 err:1; + u8 trunc:1; +#else + u8 filter_hit:4; + u8 runt:4; + u8 trunc:1; + u8 err:1; + u8 :6; +#endif + __be16 rsvd; + __be16 len; + __be64 tstamp; +}; + +struct cpl_l2t_write_req { + WR_HDR; + union opcode_tid ot; + __be16 params; +#define L2T_W_INFO(x) ((x) << 2) +#define L2T_W_PORT(x) ((x) << 8) +#define L2T_W_NOREPLY(x) ((x) << 15) + __be16 l2t_idx; + __be16 vlan; + u8 dst_mac[6]; +}; + +struct cpl_l2t_write_rpl { + union opcode_tid ot; + u8 status; + u8 rsvd[3]; +}; + +struct cpl_rdma_terminate { + union opcode_tid ot; + __be16 rsvd; + __be16 len; +}; + +struct cpl_sge_egr_update { + __be32 opcode_qid; +#define EGR_QID(x) ((x) & 0x1FFFF) + __be16 cidx; + __be16 pidx; +}; + +struct cpl_fw4_pld { + u8 opcode; + u8 rsvd0[3]; + u8 type; + u8 rsvd1; + __be16 len; + __be64 data; + __be64 rsvd2; +}; + +struct cpl_fw6_pld { + u8 opcode; + u8 rsvd[5]; + __be16 len; + __be64 data[4]; +}; + +struct cpl_fw4_msg { + u8 opcode; + u8 type; + __be16 rsvd0; + __be32 rsvd1; + __be64 data[2]; +}; + +struct cpl_fw4_ack { + union opcode_tid ot; + u8 credits; + u8 rsvd0[2]; + u8 seq_vld; + __be32 snd_nxt; + __be32 snd_una; + __be64 rsvd1; +}; + +struct cpl_fw6_msg { + u8 opcode; + u8 type; + __be16 rsvd0; + __be32 rsvd1; + __be64 data[4]; +}; + +enum { + ULP_TX_MEM_READ = 2, + ULP_TX_MEM_WRITE = 3, + ULP_TX_PKT = 4 +}; + +enum { + ULP_TX_SC_NOOP = 0x80, + ULP_TX_SC_IMM = 0x81, + ULP_TX_SC_DSGL = 0x82, + ULP_TX_SC_ISGL = 0x83 +}; + +struct ulptx_sge_pair { + __be32 len[2]; + __be64 addr[2]; +}; + +struct ulptx_sgl { + __be32 cmd_nsge; +#define ULPTX_CMD(x) ((x) << 24) +#define ULPTX_NSGE(x) ((x) << 0) + __be32 len0; + __be64 addr0; + struct ulptx_sge_pair sge[0]; +}; + +struct ulp_mem_io { + WR_HDR; + __be32 cmd; +#define ULP_MEMIO_ORDER(x) ((x) << 23) + __be32 len16; /* command length */ + __be32 dlen; /* data length in 32-byte units */ +#define ULP_MEMIO_DATA_LEN(x) ((x) << 0) + __be32 lock_addr; +#define ULP_MEMIO_ADDR(x) ((x) << 0) +#define ULP_MEMIO_LOCK(x) ((x) << 31) +}; + +#endif /* __T4_MSG_H */ diff --git a/drivers/net/cxgb4/t4_regs.h b/drivers/net/cxgb4/t4_regs.h new file mode 100644 index 000000000000..5ed56483cbc2 --- /dev/null +++ b/drivers/net/cxgb4/t4_regs.h @@ -0,0 +1,878 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __T4_REGS_H +#define __T4_REGS_H + +#define MYPF_BASE 0x1b000 +#define MYPF_REG(reg_addr) (MYPF_BASE + (reg_addr)) + +#define PF0_BASE 0x1e000 +#define PF0_REG(reg_addr) (PF0_BASE + (reg_addr)) + +#define PF_STRIDE 0x400 +#define PF_BASE(idx) (PF0_BASE + (idx) * PF_STRIDE) +#define PF_REG(idx, reg) (PF_BASE(idx) + (reg)) + +#define MYPORT_BASE 0x1c000 +#define MYPORT_REG(reg_addr) (MYPORT_BASE + (reg_addr)) + +#define PORT0_BASE 0x20000 +#define PORT0_REG(reg_addr) (PORT0_BASE + (reg_addr)) + +#define PORT_STRIDE 0x2000 +#define PORT_BASE(idx) (PORT0_BASE + (idx) * PORT_STRIDE) +#define PORT_REG(idx, reg) (PORT_BASE(idx) + (reg)) + +#define EDC_STRIDE (EDC_1_BASE_ADDR - EDC_0_BASE_ADDR) +#define EDC_REG(reg, idx) (reg + EDC_STRIDE * idx) + +#define PCIE_MEM_ACCESS_REG(reg_addr, idx) ((reg_addr) + (idx) * 8) +#define PCIE_MAILBOX_REG(reg_addr, idx) ((reg_addr) + (idx) * 8) +#define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) +#define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) + +#define SGE_PF_KDOORBELL 0x0 +#define QID_MASK 0xffff8000U +#define QID_SHIFT 15 +#define QID(x) ((x) << QID_SHIFT) +#define DBPRIO 0x00004000U +#define PIDX_MASK 0x00003fffU +#define PIDX_SHIFT 0 +#define PIDX(x) ((x) << PIDX_SHIFT) + +#define SGE_PF_GTS 0x4 +#define INGRESSQID_MASK 0xffff0000U +#define INGRESSQID_SHIFT 16 +#define INGRESSQID(x) ((x) << INGRESSQID_SHIFT) +#define TIMERREG_MASK 0x0000e000U +#define TIMERREG_SHIFT 13 +#define TIMERREG(x) ((x) << TIMERREG_SHIFT) +#define SEINTARM_MASK 0x00001000U +#define SEINTARM_SHIFT 12 +#define SEINTARM(x) ((x) << SEINTARM_SHIFT) +#define CIDXINC_MASK 0x00000fffU +#define CIDXINC_SHIFT 0 +#define CIDXINC(x) ((x) << CIDXINC_SHIFT) + +#define SGE_CONTROL 0x1008 +#define DCASYSTYPE 0x00080000U +#define RXPKTCPLMODE 0x00040000U +#define EGRSTATUSPAGESIZE 0x00020000U +#define PKTSHIFT_MASK 0x00001c00U +#define PKTSHIFT_SHIFT 10 +#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT) +#define INGPCIEBOUNDARY_MASK 0x00000380U +#define INGPCIEBOUNDARY_SHIFT 7 +#define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT) +#define INGPADBOUNDARY_MASK 0x00000070U +#define INGPADBOUNDARY_SHIFT 4 +#define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT) +#define EGRPCIEBOUNDARY_MASK 0x0000000eU +#define EGRPCIEBOUNDARY_SHIFT 1 +#define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT) +#define GLOBALENABLE 0x00000001U + +#define SGE_HOST_PAGE_SIZE 0x100c +#define HOSTPAGESIZEPF0_MASK 0x0000000fU +#define HOSTPAGESIZEPF0_SHIFT 0 +#define HOSTPAGESIZEPF0(x) ((x) << HOSTPAGESIZEPF0_SHIFT) + +#define SGE_EGRESS_QUEUES_PER_PAGE_PF 0x1010 +#define QUEUESPERPAGEPF0_MASK 0x0000000fU +#define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK) + +#define SGE_INT_CAUSE1 0x1024 +#define SGE_INT_CAUSE2 0x1030 +#define SGE_INT_CAUSE3 0x103c +#define ERR_FLM_DBP 0x80000000U +#define ERR_FLM_IDMA1 0x40000000U +#define ERR_FLM_IDMA0 0x20000000U +#define ERR_FLM_HINT 0x10000000U +#define ERR_PCIE_ERROR3 0x08000000U +#define ERR_PCIE_ERROR2 0x04000000U +#define ERR_PCIE_ERROR1 0x02000000U +#define ERR_PCIE_ERROR0 0x01000000U +#define ERR_TIMER_ABOVE_MAX_QID 0x00800000U +#define ERR_CPL_EXCEED_IQE_SIZE 0x00400000U +#define ERR_INVALID_CIDX_INC 0x00200000U +#define ERR_ITP_TIME_PAUSED 0x00100000U +#define ERR_CPL_OPCODE_0 0x00080000U +#define ERR_DROPPED_DB 0x00040000U +#define ERR_DATA_CPL_ON_HIGH_QID1 0x00020000U +#define ERR_DATA_CPL_ON_HIGH_QID0 0x00010000U +#define ERR_BAD_DB_PIDX3 0x00008000U +#define ERR_BAD_DB_PIDX2 0x00004000U +#define ERR_BAD_DB_PIDX1 0x00002000U +#define ERR_BAD_DB_PIDX0 0x00001000U +#define ERR_ING_PCIE_CHAN 0x00000800U +#define ERR_ING_CTXT_PRIO 0x00000400U +#define ERR_EGR_CTXT_PRIO 0x00000200U +#define DBFIFO_HP_INT 0x00000100U +#define DBFIFO_LP_INT 0x00000080U +#define REG_ADDRESS_ERR 0x00000040U +#define INGRESS_SIZE_ERR 0x00000020U +#define EGRESS_SIZE_ERR 0x00000010U +#define ERR_INV_CTXT3 0x00000008U +#define ERR_INV_CTXT2 0x00000004U +#define ERR_INV_CTXT1 0x00000002U +#define ERR_INV_CTXT0 0x00000001U + +#define SGE_INT_ENABLE3 0x1040 +#define SGE_FL_BUFFER_SIZE0 0x1044 +#define SGE_FL_BUFFER_SIZE1 0x1048 +#define SGE_INGRESS_RX_THRESHOLD 0x10a0 +#define THRESHOLD_0_MASK 0x3f000000U +#define THRESHOLD_0_SHIFT 24 +#define THRESHOLD_0(x) ((x) << THRESHOLD_0_SHIFT) +#define THRESHOLD_0_GET(x) (((x) & THRESHOLD_0_MASK) >> THRESHOLD_0_SHIFT) +#define THRESHOLD_1_MASK 0x003f0000U +#define THRESHOLD_1_SHIFT 16 +#define THRESHOLD_1(x) ((x) << THRESHOLD_1_SHIFT) +#define THRESHOLD_1_GET(x) (((x) & THRESHOLD_1_MASK) >> THRESHOLD_1_SHIFT) +#define THRESHOLD_2_MASK 0x00003f00U +#define THRESHOLD_2_SHIFT 8 +#define THRESHOLD_2(x) ((x) << THRESHOLD_2_SHIFT) +#define THRESHOLD_2_GET(x) (((x) & THRESHOLD_2_MASK) >> THRESHOLD_2_SHIFT) +#define THRESHOLD_3_MASK 0x0000003fU +#define THRESHOLD_3_SHIFT 0 +#define THRESHOLD_3(x) ((x) << THRESHOLD_3_SHIFT) +#define THRESHOLD_3_GET(x) (((x) & THRESHOLD_3_MASK) >> THRESHOLD_3_SHIFT) + +#define SGE_TIMER_VALUE_0_AND_1 0x10b8 +#define TIMERVALUE0_MASK 0xffff0000U +#define TIMERVALUE0_SHIFT 16 +#define TIMERVALUE0(x) ((x) << TIMERVALUE0_SHIFT) +#define TIMERVALUE0_GET(x) (((x) & TIMERVALUE0_MASK) >> TIMERVALUE0_SHIFT) +#define TIMERVALUE1_MASK 0x0000ffffU +#define TIMERVALUE1_SHIFT 0 +#define TIMERVALUE1(x) ((x) << TIMERVALUE1_SHIFT) +#define TIMERVALUE1_GET(x) (((x) & TIMERVALUE1_MASK) >> TIMERVALUE1_SHIFT) + +#define SGE_TIMER_VALUE_2_AND_3 0x10bc +#define SGE_TIMER_VALUE_4_AND_5 0x10c0 +#define SGE_DEBUG_INDEX 0x10cc +#define SGE_DEBUG_DATA_HIGH 0x10d0 +#define SGE_DEBUG_DATA_LOW 0x10d4 +#define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4 + +#define PCIE_PF_CLI 0x44 +#define PCIE_INT_CAUSE 0x3004 +#define UNXSPLCPLERR 0x20000000U +#define PCIEPINT 0x10000000U +#define PCIESINT 0x08000000U +#define RPLPERR 0x04000000U +#define RXWRPERR 0x02000000U +#define RXCPLPERR 0x01000000U +#define PIOTAGPERR 0x00800000U +#define MATAGPERR 0x00400000U +#define INTXCLRPERR 0x00200000U +#define FIDPERR 0x00100000U +#define CFGSNPPERR 0x00080000U +#define HRSPPERR 0x00040000U +#define HREQPERR 0x00020000U +#define HCNTPERR 0x00010000U +#define DRSPPERR 0x00008000U +#define DREQPERR 0x00004000U +#define DCNTPERR 0x00002000U +#define CRSPPERR 0x00001000U +#define CREQPERR 0x00000800U +#define CCNTPERR 0x00000400U +#define TARTAGPERR 0x00000200U +#define PIOREQPERR 0x00000100U +#define PIOCPLPERR 0x00000080U +#define MSIXDIPERR 0x00000040U +#define MSIXDATAPERR 0x00000020U +#define MSIXADDRHPERR 0x00000010U +#define MSIXADDRLPERR 0x00000008U +#define MSIDATAPERR 0x00000004U +#define MSIADDRHPERR 0x00000002U +#define MSIADDRLPERR 0x00000001U + +#define PCIE_NONFAT_ERR 0x3010 +#define PCIE_MEM_ACCESS_BASE_WIN 0x3068 +#define PCIEOFST_MASK 0xfffffc00U +#define BIR_MASK 0x00000300U +#define BIR_SHIFT 8 +#define BIR(x) ((x) << BIR_SHIFT) +#define WINDOW_MASK 0x000000ffU +#define WINDOW_SHIFT 0 +#define WINDOW(x) ((x) << WINDOW_SHIFT) + +#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908 +#define RNPP 0x80000000U +#define RPCP 0x20000000U +#define RCIP 0x08000000U +#define RCCP 0x04000000U +#define RFTP 0x00800000U +#define PTRP 0x00100000U + +#define PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS 0x59a4 +#define TPCP 0x40000000U +#define TNPP 0x20000000U +#define TFTP 0x10000000U +#define TCAP 0x08000000U +#define TCIP 0x04000000U +#define RCAP 0x02000000U +#define PLUP 0x00800000U +#define PLDN 0x00400000U +#define OTDD 0x00200000U +#define GTRP 0x00100000U +#define RDPE 0x00040000U +#define TDCE 0x00020000U +#define TDUE 0x00010000U + +#define MC_INT_CAUSE 0x7518 +#define ECC_UE_INT_CAUSE 0x00000004U +#define ECC_CE_INT_CAUSE 0x00000002U +#define PERR_INT_CAUSE 0x00000001U + +#define MC_ECC_STATUS 0x751c +#define ECC_CECNT_MASK 0xffff0000U +#define ECC_CECNT_SHIFT 16 +#define ECC_CECNT(x) ((x) << ECC_CECNT_SHIFT) +#define ECC_CECNT_GET(x) (((x) & ECC_CECNT_MASK) >> ECC_CECNT_SHIFT) +#define ECC_UECNT_MASK 0x0000ffffU +#define ECC_UECNT_SHIFT 0 +#define ECC_UECNT(x) ((x) << ECC_UECNT_SHIFT) +#define ECC_UECNT_GET(x) (((x) & ECC_UECNT_MASK) >> ECC_UECNT_SHIFT) + +#define MC_BIST_CMD 0x7600 +#define START_BIST 0x80000000U +#define BIST_CMD_GAP_MASK 0x0000ff00U +#define BIST_CMD_GAP_SHIFT 8 +#define BIST_CMD_GAP(x) ((x) << BIST_CMD_GAP_SHIFT) +#define BIST_OPCODE_MASK 0x00000003U +#define BIST_OPCODE_SHIFT 0 +#define BIST_OPCODE(x) ((x) << BIST_OPCODE_SHIFT) + +#define MC_BIST_CMD_ADDR 0x7604 +#define MC_BIST_CMD_LEN 0x7608 +#define MC_BIST_DATA_PATTERN 0x760c +#define BIST_DATA_TYPE_MASK 0x0000000fU +#define BIST_DATA_TYPE_SHIFT 0 +#define BIST_DATA_TYPE(x) ((x) << BIST_DATA_TYPE_SHIFT) + +#define MC_BIST_STATUS_RDATA 0x7688 + +#define MA_EXT_MEMORY_BAR 0x77c8 +#define EXT_MEM_SIZE_MASK 0x00000fffU +#define EXT_MEM_SIZE_SHIFT 0 +#define EXT_MEM_SIZE_GET(x) (((x) & EXT_MEM_SIZE_MASK) >> EXT_MEM_SIZE_SHIFT) + +#define MA_TARGET_MEM_ENABLE 0x77d8 +#define EXT_MEM_ENABLE 0x00000004U +#define EDRAM1_ENABLE 0x00000002U +#define EDRAM0_ENABLE 0x00000001U + +#define MA_INT_CAUSE 0x77e0 +#define MEM_PERR_INT_CAUSE 0x00000002U +#define MEM_WRAP_INT_CAUSE 0x00000001U + +#define MA_INT_WRAP_STATUS 0x77e4 +#define MEM_WRAP_ADDRESS_MASK 0xfffffff0U +#define MEM_WRAP_ADDRESS_SHIFT 4 +#define MEM_WRAP_ADDRESS_GET(x) (((x) & MEM_WRAP_ADDRESS_MASK) >> MEM_WRAP_ADDRESS_SHIFT) +#define MEM_WRAP_CLIENT_NUM_MASK 0x0000000fU +#define MEM_WRAP_CLIENT_NUM_SHIFT 0 +#define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT) + +#define MA_PARITY_ERROR_STATUS 0x77f4 + +#define EDC_0_BASE_ADDR 0x7900 + +#define EDC_BIST_CMD 0x7904 +#define EDC_BIST_CMD_ADDR 0x7908 +#define EDC_BIST_CMD_LEN 0x790c +#define EDC_BIST_DATA_PATTERN 0x7910 +#define EDC_BIST_STATUS_RDATA 0x7928 +#define EDC_INT_CAUSE 0x7978 +#define ECC_UE_PAR 0x00000020U +#define ECC_CE_PAR 0x00000010U +#define PERR_PAR_CAUSE 0x00000008U + +#define EDC_ECC_STATUS 0x797c + +#define EDC_1_BASE_ADDR 0x7980 + +#define CIM_PF_MAILBOX_DATA 0x240 +#define CIM_PF_MAILBOX_CTRL 0x280 +#define MBMSGVALID 0x00000008U +#define MBINTREQ 0x00000004U +#define MBOWNER_MASK 0x00000003U +#define MBOWNER_SHIFT 0 +#define MBOWNER(x) ((x) << MBOWNER_SHIFT) +#define MBOWNER_GET(x) (((x) & MBOWNER_MASK) >> MBOWNER_SHIFT) + +#define CIM_PF_HOST_INT_CAUSE 0x28c +#define MBMSGRDYINT 0x00080000U + +#define CIM_HOST_INT_CAUSE 0x7b2c +#define TIEQOUTPARERRINT 0x00100000U +#define TIEQINPARERRINT 0x00080000U +#define MBHOSTPARERR 0x00040000U +#define MBUPPARERR 0x00020000U +#define IBQPARERR 0x0001f800U +#define IBQTP0PARERR 0x00010000U +#define IBQTP1PARERR 0x00008000U +#define IBQULPPARERR 0x00004000U +#define IBQSGELOPARERR 0x00002000U +#define IBQSGEHIPARERR 0x00001000U +#define IBQNCSIPARERR 0x00000800U +#define OBQPARERR 0x000007e0U +#define OBQULP0PARERR 0x00000400U +#define OBQULP1PARERR 0x00000200U +#define OBQULP2PARERR 0x00000100U +#define OBQULP3PARERR 0x00000080U +#define OBQSGEPARERR 0x00000040U +#define OBQNCSIPARERR 0x00000020U +#define PREFDROPINT 0x00000002U +#define UPACCNONZERO 0x00000001U + +#define CIM_HOST_UPACC_INT_CAUSE 0x7b34 +#define EEPROMWRINT 0x40000000U +#define TIMEOUTMAINT 0x20000000U +#define TIMEOUTINT 0x10000000U +#define RSPOVRLOOKUPINT 0x08000000U +#define REQOVRLOOKUPINT 0x04000000U +#define BLKWRPLINT 0x02000000U +#define BLKRDPLINT 0x01000000U +#define SGLWRPLINT 0x00800000U +#define SGLRDPLINT 0x00400000U +#define BLKWRCTLINT 0x00200000U +#define BLKRDCTLINT 0x00100000U +#define SGLWRCTLINT 0x00080000U +#define SGLRDCTLINT 0x00040000U +#define BLKWREEPROMINT 0x00020000U +#define BLKRDEEPROMINT 0x00010000U +#define SGLWREEPROMINT 0x00008000U +#define SGLRDEEPROMINT 0x00004000U +#define BLKWRFLASHINT 0x00002000U +#define BLKRDFLASHINT 0x00001000U +#define SGLWRFLASHINT 0x00000800U +#define SGLRDFLASHINT 0x00000400U +#define BLKWRBOOTINT 0x00000200U +#define BLKRDBOOTINT 0x00000100U +#define SGLWRBOOTINT 0x00000080U +#define SGLRDBOOTINT 0x00000040U +#define ILLWRBEINT 0x00000020U +#define ILLRDBEINT 0x00000010U +#define ILLRDINT 0x00000008U +#define ILLWRINT 0x00000004U +#define ILLTRANSINT 0x00000002U +#define RSVDSPACEINT 0x00000001U + +#define TP_OUT_CONFIG 0x7d04 +#define VLANEXTENABLE_MASK 0x0000f000U +#define VLANEXTENABLE_SHIFT 12 + +#define TP_PARA_REG2 0x7d68 +#define MAXRXDATA_MASK 0xffff0000U +#define MAXRXDATA_SHIFT 16 +#define MAXRXDATA_GET(x) (((x) & MAXRXDATA_MASK) >> MAXRXDATA_SHIFT) + +#define TP_TIMER_RESOLUTION 0x7d90 +#define TIMERRESOLUTION_MASK 0x00ff0000U +#define TIMERRESOLUTION_SHIFT 16 +#define TIMERRESOLUTION_GET(x) (((x) & TIMERRESOLUTION_MASK) >> TIMERRESOLUTION_SHIFT) + +#define TP_SHIFT_CNT 0x7dc0 + +#define TP_CCTRL_TABLE 0x7ddc +#define TP_MTU_TABLE 0x7de4 +#define MTUINDEX_MASK 0xff000000U +#define MTUINDEX_SHIFT 24 +#define MTUINDEX(x) ((x) << MTUINDEX_SHIFT) +#define MTUWIDTH_MASK 0x000f0000U +#define MTUWIDTH_SHIFT 16 +#define MTUWIDTH(x) ((x) << MTUWIDTH_SHIFT) +#define MTUWIDTH_GET(x) (((x) & MTUWIDTH_MASK) >> MTUWIDTH_SHIFT) +#define MTUVALUE_MASK 0x00003fffU +#define MTUVALUE_SHIFT 0 +#define MTUVALUE(x) ((x) << MTUVALUE_SHIFT) +#define MTUVALUE_GET(x) (((x) & MTUVALUE_MASK) >> MTUVALUE_SHIFT) + +#define TP_RSS_LKP_TABLE 0x7dec +#define LKPTBLROWVLD 0x80000000U +#define LKPTBLQUEUE1_MASK 0x000ffc00U +#define LKPTBLQUEUE1_SHIFT 10 +#define LKPTBLQUEUE1(x) ((x) << LKPTBLQUEUE1_SHIFT) +#define LKPTBLQUEUE1_GET(x) (((x) & LKPTBLQUEUE1_MASK) >> LKPTBLQUEUE1_SHIFT) +#define LKPTBLQUEUE0_MASK 0x000003ffU +#define LKPTBLQUEUE0_SHIFT 0 +#define LKPTBLQUEUE0(x) ((x) << LKPTBLQUEUE0_SHIFT) +#define LKPTBLQUEUE0_GET(x) (((x) & LKPTBLQUEUE0_MASK) >> LKPTBLQUEUE0_SHIFT) + +#define TP_PIO_ADDR 0x7e40 +#define TP_PIO_DATA 0x7e44 +#define TP_MIB_INDEX 0x7e50 +#define TP_MIB_DATA 0x7e54 +#define TP_INT_CAUSE 0x7e74 +#define FLMTXFLSTEMPTY 0x40000000U + +#define TP_INGRESS_CONFIG 0x141 +#define VNIC 0x00000800U +#define CSUM_HAS_PSEUDO_HDR 0x00000400U +#define RM_OVLAN 0x00000200U +#define LOOKUPEVERYPKT 0x00000100U + +#define TP_MIB_MAC_IN_ERR_0 0x0 +#define TP_MIB_TCP_OUT_RST 0xc +#define TP_MIB_TCP_IN_SEG_HI 0x10 +#define TP_MIB_TCP_IN_SEG_LO 0x11 +#define TP_MIB_TCP_OUT_SEG_HI 0x12 +#define TP_MIB_TCP_OUT_SEG_LO 0x13 +#define TP_MIB_TCP_RXT_SEG_HI 0x14 +#define TP_MIB_TCP_RXT_SEG_LO 0x15 +#define TP_MIB_TNL_CNG_DROP_0 0x18 +#define TP_MIB_TCP_V6IN_ERR_0 0x28 +#define TP_MIB_TCP_V6OUT_RST 0x2c +#define TP_MIB_OFD_ARP_DROP 0x36 +#define TP_MIB_TNL_DROP_0 0x44 +#define TP_MIB_OFD_VLN_DROP_0 0x58 + +#define ULP_TX_INT_CAUSE 0x8dcc +#define PBL_BOUND_ERR_CH3 0x80000000U +#define PBL_BOUND_ERR_CH2 0x40000000U +#define PBL_BOUND_ERR_CH1 0x20000000U +#define PBL_BOUND_ERR_CH0 0x10000000U + +#define PM_RX_INT_CAUSE 0x8fdc +#define ZERO_E_CMD_ERROR 0x00400000U +#define PMRX_FRAMING_ERROR 0x003ffff0U +#define OCSPI_PAR_ERROR 0x00000008U +#define DB_OPTIONS_PAR_ERROR 0x00000004U +#define IESPI_PAR_ERROR 0x00000002U +#define E_PCMD_PAR_ERROR 0x00000001U + +#define PM_TX_INT_CAUSE 0x8ffc +#define PCMD_LEN_OVFL0 0x80000000U +#define PCMD_LEN_OVFL1 0x40000000U +#define PCMD_LEN_OVFL2 0x20000000U +#define ZERO_C_CMD_ERROR 0x10000000U +#define PMTX_FRAMING_ERROR 0x0ffffff0U +#define OESPI_PAR_ERROR 0x00000008U +#define ICSPI_PAR_ERROR 0x00000002U +#define C_PCMD_PAR_ERROR 0x00000001U + +#define MPS_PORT_STAT_TX_PORT_BYTES_L 0x400 +#define MPS_PORT_STAT_TX_PORT_BYTES_H 0x404 +#define MPS_PORT_STAT_TX_PORT_FRAMES_L 0x408 +#define MPS_PORT_STAT_TX_PORT_FRAMES_H 0x40c +#define MPS_PORT_STAT_TX_PORT_BCAST_L 0x410 +#define MPS_PORT_STAT_TX_PORT_BCAST_H 0x414 +#define MPS_PORT_STAT_TX_PORT_MCAST_L 0x418 +#define MPS_PORT_STAT_TX_PORT_MCAST_H 0x41c +#define MPS_PORT_STAT_TX_PORT_UCAST_L 0x420 +#define MPS_PORT_STAT_TX_PORT_UCAST_H 0x424 +#define MPS_PORT_STAT_TX_PORT_ERROR_L 0x428 +#define MPS_PORT_STAT_TX_PORT_ERROR_H 0x42c +#define MPS_PORT_STAT_TX_PORT_64B_L 0x430 +#define MPS_PORT_STAT_TX_PORT_64B_H 0x434 +#define MPS_PORT_STAT_TX_PORT_65B_127B_L 0x438 +#define MPS_PORT_STAT_TX_PORT_65B_127B_H 0x43c +#define MPS_PORT_STAT_TX_PORT_128B_255B_L 0x440 +#define MPS_PORT_STAT_TX_PORT_128B_255B_H 0x444 +#define MPS_PORT_STAT_TX_PORT_256B_511B_L 0x448 +#define MPS_PORT_STAT_TX_PORT_256B_511B_H 0x44c +#define MPS_PORT_STAT_TX_PORT_512B_1023B_L 0x450 +#define MPS_PORT_STAT_TX_PORT_512B_1023B_H 0x454 +#define MPS_PORT_STAT_TX_PORT_1024B_1518B_L 0x458 +#define MPS_PORT_STAT_TX_PORT_1024B_1518B_H 0x45c +#define MPS_PORT_STAT_TX_PORT_1519B_MAX_L 0x460 +#define MPS_PORT_STAT_TX_PORT_1519B_MAX_H 0x464 +#define MPS_PORT_STAT_TX_PORT_DROP_L 0x468 +#define MPS_PORT_STAT_TX_PORT_DROP_H 0x46c +#define MPS_PORT_STAT_TX_PORT_PAUSE_L 0x470 +#define MPS_PORT_STAT_TX_PORT_PAUSE_H 0x474 +#define MPS_PORT_STAT_TX_PORT_PPP0_L 0x478 +#define MPS_PORT_STAT_TX_PORT_PPP0_H 0x47c +#define MPS_PORT_STAT_TX_PORT_PPP1_L 0x480 +#define MPS_PORT_STAT_TX_PORT_PPP1_H 0x484 +#define MPS_PORT_STAT_TX_PORT_PPP2_L 0x488 +#define MPS_PORT_STAT_TX_PORT_PPP2_H 0x48c +#define MPS_PORT_STAT_TX_PORT_PPP3_L 0x490 +#define MPS_PORT_STAT_TX_PORT_PPP3_H 0x494 +#define MPS_PORT_STAT_TX_PORT_PPP4_L 0x498 +#define MPS_PORT_STAT_TX_PORT_PPP4_H 0x49c +#define MPS_PORT_STAT_TX_PORT_PPP5_L 0x4a0 +#define MPS_PORT_STAT_TX_PORT_PPP5_H 0x4a4 +#define MPS_PORT_STAT_TX_PORT_PPP6_L 0x4a8 +#define MPS_PORT_STAT_TX_PORT_PPP6_H 0x4ac +#define MPS_PORT_STAT_TX_PORT_PPP7_L 0x4b0 +#define MPS_PORT_STAT_TX_PORT_PPP7_H 0x4b4 +#define MPS_PORT_STAT_LB_PORT_BYTES_L 0x4c0 +#define MPS_PORT_STAT_LB_PORT_BYTES_H 0x4c4 +#define MPS_PORT_STAT_LB_PORT_FRAMES_L 0x4c8 +#define MPS_PORT_STAT_LB_PORT_FRAMES_H 0x4cc +#define MPS_PORT_STAT_LB_PORT_BCAST_L 0x4d0 +#define MPS_PORT_STAT_LB_PORT_BCAST_H 0x4d4 +#define MPS_PORT_STAT_LB_PORT_MCAST_L 0x4d8 +#define MPS_PORT_STAT_LB_PORT_MCAST_H 0x4dc +#define MPS_PORT_STAT_LB_PORT_UCAST_L 0x4e0 +#define MPS_PORT_STAT_LB_PORT_UCAST_H 0x4e4 +#define MPS_PORT_STAT_LB_PORT_ERROR_L 0x4e8 +#define MPS_PORT_STAT_LB_PORT_ERROR_H 0x4ec +#define MPS_PORT_STAT_LB_PORT_64B_L 0x4f0 +#define MPS_PORT_STAT_LB_PORT_64B_H 0x4f4 +#define MPS_PORT_STAT_LB_PORT_65B_127B_L 0x4f8 +#define MPS_PORT_STAT_LB_PORT_65B_127B_H 0x4fc +#define MPS_PORT_STAT_LB_PORT_128B_255B_L 0x500 +#define MPS_PORT_STAT_LB_PORT_128B_255B_H 0x504 +#define MPS_PORT_STAT_LB_PORT_256B_511B_L 0x508 +#define MPS_PORT_STAT_LB_PORT_256B_511B_H 0x50c +#define MPS_PORT_STAT_LB_PORT_512B_1023B_L 0x510 +#define MPS_PORT_STAT_LB_PORT_512B_1023B_H 0x514 +#define MPS_PORT_STAT_LB_PORT_1024B_1518B_L 0x518 +#define MPS_PORT_STAT_LB_PORT_1024B_1518B_H 0x51c +#define MPS_PORT_STAT_LB_PORT_1519B_MAX_L 0x520 +#define MPS_PORT_STAT_LB_PORT_1519B_MAX_H 0x524 +#define MPS_PORT_STAT_LB_PORT_DROP_FRAMES 0x528 +#define MPS_PORT_STAT_RX_PORT_BYTES_L 0x540 +#define MPS_PORT_STAT_RX_PORT_BYTES_H 0x544 +#define MPS_PORT_STAT_RX_PORT_FRAMES_L 0x548 +#define MPS_PORT_STAT_RX_PORT_FRAMES_H 0x54c +#define MPS_PORT_STAT_RX_PORT_BCAST_L 0x550 +#define MPS_PORT_STAT_RX_PORT_BCAST_H 0x554 +#define MPS_PORT_STAT_RX_PORT_MCAST_L 0x558 +#define MPS_PORT_STAT_RX_PORT_MCAST_H 0x55c +#define MPS_PORT_STAT_RX_PORT_UCAST_L 0x560 +#define MPS_PORT_STAT_RX_PORT_UCAST_H 0x564 +#define MPS_PORT_STAT_RX_PORT_MTU_ERROR_L 0x568 +#define MPS_PORT_STAT_RX_PORT_MTU_ERROR_H 0x56c +#define MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L 0x570 +#define MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_H 0x574 +#define MPS_PORT_STAT_RX_PORT_CRC_ERROR_L 0x578 +#define MPS_PORT_STAT_RX_PORT_CRC_ERROR_H 0x57c +#define MPS_PORT_STAT_RX_PORT_LEN_ERROR_L 0x580 +#define MPS_PORT_STAT_RX_PORT_LEN_ERROR_H 0x584 +#define MPS_PORT_STAT_RX_PORT_SYM_ERROR_L 0x588 +#define MPS_PORT_STAT_RX_PORT_SYM_ERROR_H 0x58c +#define MPS_PORT_STAT_RX_PORT_64B_L 0x590 +#define MPS_PORT_STAT_RX_PORT_64B_H 0x594 +#define MPS_PORT_STAT_RX_PORT_65B_127B_L 0x598 +#define MPS_PORT_STAT_RX_PORT_65B_127B_H 0x59c +#define MPS_PORT_STAT_RX_PORT_128B_255B_L 0x5a0 +#define MPS_PORT_STAT_RX_PORT_128B_255B_H 0x5a4 +#define MPS_PORT_STAT_RX_PORT_256B_511B_L 0x5a8 +#define MPS_PORT_STAT_RX_PORT_256B_511B_H 0x5ac +#define MPS_PORT_STAT_RX_PORT_512B_1023B_L 0x5b0 +#define MPS_PORT_STAT_RX_PORT_512B_1023B_H 0x5b4 +#define MPS_PORT_STAT_RX_PORT_1024B_1518B_L 0x5b8 +#define MPS_PORT_STAT_RX_PORT_1024B_1518B_H 0x5bc +#define MPS_PORT_STAT_RX_PORT_1519B_MAX_L 0x5c0 +#define MPS_PORT_STAT_RX_PORT_1519B_MAX_H 0x5c4 +#define MPS_PORT_STAT_RX_PORT_PAUSE_L 0x5c8 +#define MPS_PORT_STAT_RX_PORT_PAUSE_H 0x5cc +#define MPS_PORT_STAT_RX_PORT_PPP0_L 0x5d0 +#define MPS_PORT_STAT_RX_PORT_PPP0_H 0x5d4 +#define MPS_PORT_STAT_RX_PORT_PPP1_L 0x5d8 +#define MPS_PORT_STAT_RX_PORT_PPP1_H 0x5dc +#define MPS_PORT_STAT_RX_PORT_PPP2_L 0x5e0 +#define MPS_PORT_STAT_RX_PORT_PPP2_H 0x5e4 +#define MPS_PORT_STAT_RX_PORT_PPP3_L 0x5e8 +#define MPS_PORT_STAT_RX_PORT_PPP3_H 0x5ec +#define MPS_PORT_STAT_RX_PORT_PPP4_L 0x5f0 +#define MPS_PORT_STAT_RX_PORT_PPP4_H 0x5f4 +#define MPS_PORT_STAT_RX_PORT_PPP5_L 0x5f8 +#define MPS_PORT_STAT_RX_PORT_PPP5_H 0x5fc +#define MPS_PORT_STAT_RX_PORT_PPP6_L 0x600 +#define MPS_PORT_STAT_RX_PORT_PPP6_H 0x604 +#define MPS_PORT_STAT_RX_PORT_PPP7_L 0x608 +#define MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c +#define MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610 +#define MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614 +#define MPS_CMN_CTL 0x9000 +#define NUMPORTS_MASK 0x00000003U +#define NUMPORTS_SHIFT 0 +#define NUMPORTS_GET(x) (((x) & NUMPORTS_MASK) >> NUMPORTS_SHIFT) + +#define MPS_INT_CAUSE 0x9008 +#define STATINT 0x00000020U +#define TXINT 0x00000010U +#define RXINT 0x00000008U +#define TRCINT 0x00000004U +#define CLSINT 0x00000002U +#define PLINT 0x00000001U + +#define MPS_TX_INT_CAUSE 0x9408 +#define PORTERR 0x00010000U +#define FRMERR 0x00008000U +#define SECNTERR 0x00004000U +#define BUBBLE 0x00002000U +#define TXDESCFIFO 0x00001e00U +#define TXDATAFIFO 0x000001e0U +#define NCSIFIFO 0x00000010U +#define TPFIFO 0x0000000fU + +#define MPS_STAT_PERR_INT_CAUSE_SRAM 0x9614 +#define MPS_STAT_PERR_INT_CAUSE_TX_FIFO 0x9620 +#define MPS_STAT_PERR_INT_CAUSE_RX_FIFO 0x962c + +#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L 0x9640 +#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_H 0x9644 +#define MPS_STAT_RX_BG_1_MAC_DROP_FRAME_L 0x9648 +#define MPS_STAT_RX_BG_1_MAC_DROP_FRAME_H 0x964c +#define MPS_STAT_RX_BG_2_MAC_DROP_FRAME_L 0x9650 +#define MPS_STAT_RX_BG_2_MAC_DROP_FRAME_H 0x9654 +#define MPS_STAT_RX_BG_3_MAC_DROP_FRAME_L 0x9658 +#define MPS_STAT_RX_BG_3_MAC_DROP_FRAME_H 0x965c +#define MPS_STAT_RX_BG_0_LB_DROP_FRAME_L 0x9660 +#define MPS_STAT_RX_BG_0_LB_DROP_FRAME_H 0x9664 +#define MPS_STAT_RX_BG_1_LB_DROP_FRAME_L 0x9668 +#define MPS_STAT_RX_BG_1_LB_DROP_FRAME_H 0x966c +#define MPS_STAT_RX_BG_2_LB_DROP_FRAME_L 0x9670 +#define MPS_STAT_RX_BG_2_LB_DROP_FRAME_H 0x9674 +#define MPS_STAT_RX_BG_3_LB_DROP_FRAME_L 0x9678 +#define MPS_STAT_RX_BG_3_LB_DROP_FRAME_H 0x967c +#define MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L 0x9680 +#define MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_H 0x9684 +#define MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_L 0x9688 +#define MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_H 0x968c +#define MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_L 0x9690 +#define MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_H 0x9694 +#define MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_L 0x9698 +#define MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_H 0x969c +#define MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_L 0x96a0 +#define MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_H 0x96a4 +#define MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_L 0x96a8 +#define MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_H 0x96ac +#define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_L 0x96b0 +#define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_H 0x96b4 +#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8 +#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc +#define MPS_TRC_CFG 0x9800 +#define TRCFIFOEMPTY 0x00000010U +#define TRCIGNOREDROPINPUT 0x00000008U +#define TRCKEEPDUPLICATES 0x00000004U +#define TRCEN 0x00000002U +#define TRCMULTIFILTER 0x00000001U + +#define MPS_TRC_RSS_CONTROL 0x9808 +#define RSSCONTROL_MASK 0x00ff0000U +#define RSSCONTROL_SHIFT 16 +#define RSSCONTROL(x) ((x) << RSSCONTROL_SHIFT) +#define QUEUENUMBER_MASK 0x0000ffffU +#define QUEUENUMBER_SHIFT 0 +#define QUEUENUMBER(x) ((x) << QUEUENUMBER_SHIFT) + +#define MPS_TRC_FILTER_MATCH_CTL_A 0x9810 +#define TFINVERTMATCH 0x01000000U +#define TFPKTTOOLARGE 0x00800000U +#define TFEN 0x00400000U +#define TFPORT_MASK 0x003c0000U +#define TFPORT_SHIFT 18 +#define TFPORT(x) ((x) << TFPORT_SHIFT) +#define TFPORT_GET(x) (((x) & TFPORT_MASK) >> TFPORT_SHIFT) +#define TFDROP 0x00020000U +#define TFSOPEOPERR 0x00010000U +#define TFLENGTH_MASK 0x00001f00U +#define TFLENGTH_SHIFT 8 +#define TFLENGTH(x) ((x) << TFLENGTH_SHIFT) +#define TFLENGTH_GET(x) (((x) & TFLENGTH_MASK) >> TFLENGTH_SHIFT) +#define TFOFFSET_MASK 0x0000001fU +#define TFOFFSET_SHIFT 0 +#define TFOFFSET(x) ((x) << TFOFFSET_SHIFT) +#define TFOFFSET_GET(x) (((x) & TFOFFSET_MASK) >> TFOFFSET_SHIFT) + +#define MPS_TRC_FILTER_MATCH_CTL_B 0x9820 +#define TFMINPKTSIZE_MASK 0x01ff0000U +#define TFMINPKTSIZE_SHIFT 16 +#define TFMINPKTSIZE(x) ((x) << TFMINPKTSIZE_SHIFT) +#define TFMINPKTSIZE_GET(x) (((x) & TFMINPKTSIZE_MASK) >> TFMINPKTSIZE_SHIFT) +#define TFCAPTUREMAX_MASK 0x00003fffU +#define TFCAPTUREMAX_SHIFT 0 +#define TFCAPTUREMAX(x) ((x) << TFCAPTUREMAX_SHIFT) +#define TFCAPTUREMAX_GET(x) (((x) & TFCAPTUREMAX_MASK) >> TFCAPTUREMAX_SHIFT) + +#define MPS_TRC_INT_CAUSE 0x985c +#define MISCPERR 0x00000100U +#define PKTFIFO 0x000000f0U +#define FILTMEM 0x0000000fU + +#define MPS_TRC_FILTER0_MATCH 0x9c00 +#define MPS_TRC_FILTER0_DONT_CARE 0x9c80 +#define MPS_TRC_FILTER1_MATCH 0x9d00 +#define MPS_CLS_INT_CAUSE 0xd028 +#define PLERRENB 0x00000008U +#define HASHSRAM 0x00000004U +#define MATCHTCAM 0x00000002U +#define MATCHSRAM 0x00000001U + +#define MPS_RX_PERR_INT_CAUSE 0x11074 + +#define CPL_INTR_CAUSE 0x19054 +#define CIM_OP_MAP_PERR 0x00000020U +#define CIM_OVFL_ERROR 0x00000010U +#define TP_FRAMING_ERROR 0x00000008U +#define SGE_FRAMING_ERROR 0x00000004U +#define CIM_FRAMING_ERROR 0x00000002U +#define ZERO_SWITCH_ERROR 0x00000001U + +#define SMB_INT_CAUSE 0x19090 +#define MSTTXFIFOPARINT 0x00200000U +#define MSTRXFIFOPARINT 0x00100000U +#define SLVFIFOPARINT 0x00080000U + +#define ULP_RX_INT_CAUSE 0x19158 +#define ULP_RX_ISCSI_TAGMASK 0x19164 +#define ULP_RX_ISCSI_PSZ 0x19168 +#define HPZ3_MASK 0x0f000000U +#define HPZ3_SHIFT 24 +#define HPZ3(x) ((x) << HPZ3_SHIFT) +#define HPZ2_MASK 0x000f0000U +#define HPZ2_SHIFT 16 +#define HPZ2(x) ((x) << HPZ2_SHIFT) +#define HPZ1_MASK 0x00000f00U +#define HPZ1_SHIFT 8 +#define HPZ1(x) ((x) << HPZ1_SHIFT) +#define HPZ0_MASK 0x0000000fU +#define HPZ0_SHIFT 0 +#define HPZ0(x) ((x) << HPZ0_SHIFT) + +#define ULP_RX_TDDP_PSZ 0x19178 + +#define SF_DATA 0x193f8 +#define SF_OP 0x193fc +#define BUSY 0x80000000U +#define SF_LOCK 0x00000010U +#define SF_CONT 0x00000008U +#define BYTECNT_MASK 0x00000006U +#define BYTECNT_SHIFT 1 +#define BYTECNT(x) ((x) << BYTECNT_SHIFT) +#define OP_WR 0x00000001U + +#define PL_PF_INT_CAUSE 0x3c0 +#define PFSW 0x00000008U +#define PFSGE 0x00000004U +#define PFCIM 0x00000002U +#define PFMPS 0x00000001U + +#define PL_PF_INT_ENABLE 0x3c4 +#define PL_PF_CTL 0x3c8 +#define SWINT 0x00000001U + +#define PL_WHOAMI 0x19400 +#define SOURCEPF_MASK 0x00000700U +#define SOURCEPF_SHIFT 8 +#define SOURCEPF(x) ((x) << SOURCEPF_SHIFT) +#define SOURCEPF_GET(x) (((x) & SOURCEPF_MASK) >> SOURCEPF_SHIFT) +#define ISVF 0x00000080U +#define VFID_MASK 0x0000007fU +#define VFID_SHIFT 0 +#define VFID(x) ((x) << VFID_SHIFT) +#define VFID_GET(x) (((x) & VFID_MASK) >> VFID_SHIFT) + +#define PL_INT_CAUSE 0x1940c +#define ULP_TX 0x08000000U +#define SGE 0x04000000U +#define HMA 0x02000000U +#define CPL_SWITCH 0x01000000U +#define ULP_RX 0x00800000U +#define PM_RX 0x00400000U +#define PM_TX 0x00200000U +#define MA 0x00100000U +#define TP 0x00080000U +#define LE 0x00040000U +#define EDC1 0x00020000U +#define EDC0 0x00010000U +#define MC 0x00008000U +#define PCIE 0x00004000U +#define PMU 0x00002000U +#define XGMAC_KR1 0x00001000U +#define XGMAC_KR0 0x00000800U +#define XGMAC1 0x00000400U +#define XGMAC0 0x00000200U +#define SMB 0x00000100U +#define SF 0x00000080U +#define PL 0x00000040U +#define NCSI 0x00000020U +#define MPS 0x00000010U +#define MI 0x00000008U +#define DBG 0x00000004U +#define I2CM 0x00000002U +#define CIM 0x00000001U + +#define PL_INT_MAP0 0x19414 +#define PL_RST 0x19428 +#define PIORST 0x00000002U +#define PIORSTMODE 0x00000001U + +#define PL_PL_INT_CAUSE 0x19430 +#define FATALPERR 0x00000010U +#define PERRVFID 0x00000001U + +#define PL_REV 0x1943c + +#define LE_DB_CONFIG 0x19c04 +#define HASHEN 0x00100000U + +#define LE_DB_SERVER_INDEX 0x19c18 +#define LE_DB_ACT_CNT_IPV4 0x19c20 +#define LE_DB_ACT_CNT_IPV6 0x19c24 + +#define LE_DB_INT_CAUSE 0x19c3c +#define REQQPARERR 0x00010000U +#define UNKNOWNCMD 0x00008000U +#define PARITYERR 0x00000040U +#define LIPMISS 0x00000020U +#define LIP0 0x00000010U + +#define LE_DB_TID_HASHBASE 0x19df8 + +#define NCSI_INT_CAUSE 0x1a0d8 +#define CIM_DM_PRTY_ERR 0x00000100U +#define MPS_DM_PRTY_ERR 0x00000080U +#define TXFIFO_PRTY_ERR 0x00000002U +#define RXFIFO_PRTY_ERR 0x00000001U + +#define XGMAC_PORT_CFG2 0x1018 +#define PATEN 0x00040000U +#define MAGICEN 0x00020000U + +#define XGMAC_PORT_MAGIC_MACID_LO 0x1024 +#define XGMAC_PORT_MAGIC_MACID_HI 0x1028 + +#define XGMAC_PORT_EPIO_DATA0 0x10c0 +#define XGMAC_PORT_EPIO_DATA1 0x10c4 +#define XGMAC_PORT_EPIO_DATA2 0x10c8 +#define XGMAC_PORT_EPIO_DATA3 0x10cc +#define XGMAC_PORT_EPIO_OP 0x10d0 +#define EPIOWR 0x00000100U +#define ADDRESS_MASK 0x000000ffU +#define ADDRESS_SHIFT 0 +#define ADDRESS(x) ((x) << ADDRESS_SHIFT) + +#define XGMAC_PORT_INT_CAUSE 0x10dc +#endif /* __T4_REGS_H */ diff --git a/drivers/net/cxgb4/t4fw_api.h b/drivers/net/cxgb4/t4fw_api.h new file mode 100644 index 000000000000..3393d05a388a --- /dev/null +++ b/drivers/net/cxgb4/t4fw_api.h @@ -0,0 +1,1580 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _T4FW_INTERFACE_H_ +#define _T4FW_INTERFACE_H_ + +#define FW_T4VF_SGE_BASE_ADDR 0x0000 +#define FW_T4VF_MPS_BASE_ADDR 0x0100 +#define FW_T4VF_PL_BASE_ADDR 0x0200 +#define FW_T4VF_MBDATA_BASE_ADDR 0x0240 +#define FW_T4VF_CIM_BASE_ADDR 0x0300 + +enum fw_wr_opcodes { + FW_FILTER_WR = 0x02, + FW_ULPTX_WR = 0x04, + FW_TP_WR = 0x05, + FW_ETH_TX_PKT_WR = 0x08, + FW_FLOWC_WR = 0x0a, + FW_OFLD_TX_DATA_WR = 0x0b, + FW_CMD_WR = 0x10, + FW_ETH_TX_PKT_VM_WR = 0x11, + FW_RI_RES_WR = 0x0c, + FW_RI_INIT_WR = 0x0d, + FW_RI_RDMA_WRITE_WR = 0x14, + FW_RI_SEND_WR = 0x15, + FW_RI_RDMA_READ_WR = 0x16, + FW_RI_RECV_WR = 0x17, + FW_RI_BIND_MW_WR = 0x18, + FW_RI_FR_NSMR_WR = 0x19, + FW_RI_INV_LSTAG_WR = 0x1a, + FW_LASTC2E_WR = 0x40 +}; + +struct fw_wr_hdr { + __be32 hi; + __be32 lo; +}; + +#define FW_WR_OP(x) ((x) << 24) +#define FW_WR_ATOMIC(x) ((x) << 23) +#define FW_WR_FLUSH(x) ((x) << 22) +#define FW_WR_COMPL(x) ((x) << 21) +#define FW_WR_IMMDLEN(x) ((x) << 0) + +#define FW_WR_EQUIQ (1U << 31) +#define FW_WR_EQUEQ (1U << 30) +#define FW_WR_FLOWID(x) ((x) << 8) +#define FW_WR_LEN16(x) ((x) << 0) + +struct fw_ulptx_wr { + __be32 op_to_compl; + __be32 flowid_len16; + u64 cookie; +}; + +struct fw_tp_wr { + __be32 op_to_immdlen; + __be32 flowid_len16; + u64 cookie; +}; + +struct fw_eth_tx_pkt_wr { + __be32 op_immdlen; + __be32 equiq_to_len16; + __be64 r3; +}; + +enum fw_flowc_mnem { + FW_FLOWC_MNEM_PFNVFN, /* PFN [15:8] VFN [7:0] */ + FW_FLOWC_MNEM_CH, + FW_FLOWC_MNEM_PORT, + FW_FLOWC_MNEM_IQID, + FW_FLOWC_MNEM_SNDNXT, + FW_FLOWC_MNEM_RCVNXT, + FW_FLOWC_MNEM_SNDBUF, + FW_FLOWC_MNEM_MSS, +}; + +struct fw_flowc_mnemval { + u8 mnemonic; + u8 r4[3]; + __be32 val; +}; + +struct fw_flowc_wr { + __be32 op_to_nparams; +#define FW_FLOWC_WR_NPARAMS(x) ((x) << 0) + __be32 flowid_len16; + struct fw_flowc_mnemval mnemval[0]; +}; + +struct fw_ofld_tx_data_wr { + __be32 op_to_immdlen; + __be32 flowid_len16; + __be32 plen; + __be32 tunnel_to_proxy; +#define FW_OFLD_TX_DATA_WR_TUNNEL(x) ((x) << 19) +#define FW_OFLD_TX_DATA_WR_SAVE(x) ((x) << 18) +#define FW_OFLD_TX_DATA_WR_FLUSH(x) ((x) << 17) +#define FW_OFLD_TX_DATA_WR_URGENT(x) ((x) << 16) +#define FW_OFLD_TX_DATA_WR_MORE(x) ((x) << 15) +#define FW_OFLD_TX_DATA_WR_SHOVE(x) ((x) << 14) +#define FW_OFLD_TX_DATA_WR_ULPMODE(x) ((x) << 10) +#define FW_OFLD_TX_DATA_WR_ULPSUBMODE(x) ((x) << 6) +}; + +struct fw_cmd_wr { + __be32 op_dma; +#define FW_CMD_WR_DMA (1U << 17) + __be32 len16_pkd; + __be64 cookie_daddr; +}; + +struct fw_eth_tx_pkt_vm_wr { + __be32 op_immdlen; + __be32 equiq_to_len16; + __be32 r3[2]; + u8 ethmacdst[6]; + u8 ethmacsrc[6]; + __be16 ethtype; + __be16 vlantci; +}; + +#define FW_CMD_MAX_TIMEOUT 3000 + +enum fw_cmd_opcodes { + FW_LDST_CMD = 0x01, + FW_RESET_CMD = 0x03, + FW_HELLO_CMD = 0x04, + FW_BYE_CMD = 0x05, + FW_INITIALIZE_CMD = 0x06, + FW_CAPS_CONFIG_CMD = 0x07, + FW_PARAMS_CMD = 0x08, + FW_PFVF_CMD = 0x09, + FW_IQ_CMD = 0x10, + FW_EQ_MNGT_CMD = 0x11, + FW_EQ_ETH_CMD = 0x12, + FW_EQ_CTRL_CMD = 0x13, + FW_EQ_OFLD_CMD = 0x21, + FW_VI_CMD = 0x14, + FW_VI_MAC_CMD = 0x15, + FW_VI_RXMODE_CMD = 0x16, + FW_VI_ENABLE_CMD = 0x17, + FW_ACL_MAC_CMD = 0x18, + FW_ACL_VLAN_CMD = 0x19, + FW_VI_STATS_CMD = 0x1a, + FW_PORT_CMD = 0x1b, + FW_PORT_STATS_CMD = 0x1c, + FW_PORT_LB_STATS_CMD = 0x1d, + FW_PORT_TRACE_CMD = 0x1e, + FW_PORT_TRACE_MMAP_CMD = 0x1f, + FW_RSS_IND_TBL_CMD = 0x20, + FW_RSS_GLB_CONFIG_CMD = 0x22, + FW_RSS_VI_CONFIG_CMD = 0x23, + FW_LASTC2E_CMD = 0x40, + FW_ERROR_CMD = 0x80, + FW_DEBUG_CMD = 0x81, +}; + +enum fw_cmd_cap { + FW_CMD_CAP_PF = 0x01, + FW_CMD_CAP_DMAQ = 0x02, + FW_CMD_CAP_PORT = 0x04, + FW_CMD_CAP_PORTPROMISC = 0x08, + FW_CMD_CAP_PORTSTATS = 0x10, + FW_CMD_CAP_VF = 0x80, +}; + +/* + * Generic command header flit0 + */ +struct fw_cmd_hdr { + __be32 hi; + __be32 lo; +}; + +#define FW_CMD_OP(x) ((x) << 24) +#define FW_CMD_OP_GET(x) (((x) >> 24) & 0xff) +#define FW_CMD_REQUEST (1U << 23) +#define FW_CMD_READ (1U << 22) +#define FW_CMD_WRITE (1U << 21) +#define FW_CMD_EXEC (1U << 20) +#define FW_CMD_RAMASK(x) ((x) << 20) +#define FW_CMD_RETVAL(x) ((x) << 8) +#define FW_CMD_RETVAL_GET(x) (((x) >> 8) & 0xff) +#define FW_CMD_LEN16(x) ((x) << 0) + +enum fw_ldst_addrspc { + FW_LDST_ADDRSPC_FIRMWARE = 0x0001, + FW_LDST_ADDRSPC_SGE_EGRC = 0x0008, + FW_LDST_ADDRSPC_SGE_INGC = 0x0009, + FW_LDST_ADDRSPC_SGE_FLMC = 0x000a, + FW_LDST_ADDRSPC_SGE_CONMC = 0x000b, + FW_LDST_ADDRSPC_TP_PIO = 0x0010, + FW_LDST_ADDRSPC_TP_TM_PIO = 0x0011, + FW_LDST_ADDRSPC_TP_MIB = 0x0012, + FW_LDST_ADDRSPC_MDIO = 0x0018, + FW_LDST_ADDRSPC_MPS = 0x0020, + FW_LDST_ADDRSPC_FUNC = 0x0028 +}; + +enum fw_ldst_mps_fid { + FW_LDST_MPS_ATRB, + FW_LDST_MPS_RPLC +}; + +enum fw_ldst_func_access_ctl { + FW_LDST_FUNC_ACC_CTL_VIID, + FW_LDST_FUNC_ACC_CTL_FID +}; + +enum fw_ldst_func_mod_index { + FW_LDST_FUNC_MPS +}; + +struct fw_ldst_cmd { + __be32 op_to_addrspace; +#define FW_LDST_CMD_ADDRSPACE(x) ((x) << 0) + __be32 cycles_to_len16; + union fw_ldst { + struct fw_ldst_addrval { + __be32 addr; + __be32 val; + } addrval; + struct fw_ldst_idctxt { + __be32 physid; + __be32 msg_pkd; + __be32 ctxt_data7; + __be32 ctxt_data6; + __be32 ctxt_data5; + __be32 ctxt_data4; + __be32 ctxt_data3; + __be32 ctxt_data2; + __be32 ctxt_data1; + __be32 ctxt_data0; + } idctxt; + struct fw_ldst_mdio { + __be16 paddr_mmd; + __be16 raddr; + __be16 vctl; + __be16 rval; + } mdio; + struct fw_ldst_mps { + __be16 fid_ctl; + __be16 rplcpf_pkd; + __be32 rplc127_96; + __be32 rplc95_64; + __be32 rplc63_32; + __be32 rplc31_0; + __be32 atrb; + __be16 vlan[16]; + } mps; + struct fw_ldst_func { + u8 access_ctl; + u8 mod_index; + __be16 ctl_id; + __be32 offset; + __be64 data0; + __be64 data1; + } func; + } u; +}; + +#define FW_LDST_CMD_MSG(x) ((x) << 31) +#define FW_LDST_CMD_PADDR(x) ((x) << 8) +#define FW_LDST_CMD_MMD(x) ((x) << 0) +#define FW_LDST_CMD_FID(x) ((x) << 15) +#define FW_LDST_CMD_CTL(x) ((x) << 0) +#define FW_LDST_CMD_RPLCPF(x) ((x) << 0) + +struct fw_reset_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be32 val; + __be32 r3; +}; + +struct fw_hello_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be32 err_to_mbasyncnot; +#define FW_HELLO_CMD_ERR (1U << 31) +#define FW_HELLO_CMD_INIT (1U << 30) +#define FW_HELLO_CMD_MASTERDIS(x) ((x) << 29) +#define FW_HELLO_CMD_MASTERFORCE(x) ((x) << 28) +#define FW_HELLO_CMD_MBMASTER(x) ((x) << 24) +#define FW_HELLO_CMD_MBASYNCNOT(x) ((x) << 20) + __be32 fwrev; +}; + +struct fw_bye_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be64 r3; +}; + +struct fw_initialize_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be64 r3; +}; + +enum fw_caps_config_hm { + FW_CAPS_CONFIG_HM_PCIE = 0x00000001, + FW_CAPS_CONFIG_HM_PL = 0x00000002, + FW_CAPS_CONFIG_HM_SGE = 0x00000004, + FW_CAPS_CONFIG_HM_CIM = 0x00000008, + FW_CAPS_CONFIG_HM_ULPTX = 0x00000010, + FW_CAPS_CONFIG_HM_TP = 0x00000020, + FW_CAPS_CONFIG_HM_ULPRX = 0x00000040, + FW_CAPS_CONFIG_HM_PMRX = 0x00000080, + FW_CAPS_CONFIG_HM_PMTX = 0x00000100, + FW_CAPS_CONFIG_HM_MC = 0x00000200, + FW_CAPS_CONFIG_HM_LE = 0x00000400, + FW_CAPS_CONFIG_HM_MPS = 0x00000800, + FW_CAPS_CONFIG_HM_XGMAC = 0x00001000, + FW_CAPS_CONFIG_HM_CPLSWITCH = 0x00002000, + FW_CAPS_CONFIG_HM_T4DBG = 0x00004000, + FW_CAPS_CONFIG_HM_MI = 0x00008000, + FW_CAPS_CONFIG_HM_I2CM = 0x00010000, + FW_CAPS_CONFIG_HM_NCSI = 0x00020000, + FW_CAPS_CONFIG_HM_SMB = 0x00040000, + FW_CAPS_CONFIG_HM_MA = 0x00080000, + FW_CAPS_CONFIG_HM_EDRAM = 0x00100000, + FW_CAPS_CONFIG_HM_PMU = 0x00200000, + FW_CAPS_CONFIG_HM_UART = 0x00400000, + FW_CAPS_CONFIG_HM_SF = 0x00800000, +}; + +enum fw_caps_config_nbm { + FW_CAPS_CONFIG_NBM_IPMI = 0x00000001, + FW_CAPS_CONFIG_NBM_NCSI = 0x00000002, +}; + +enum fw_caps_config_link { + FW_CAPS_CONFIG_LINK_PPP = 0x00000001, + FW_CAPS_CONFIG_LINK_QFC = 0x00000002, + FW_CAPS_CONFIG_LINK_DCBX = 0x00000004, +}; + +enum fw_caps_config_switch { + FW_CAPS_CONFIG_SWITCH_INGRESS = 0x00000001, + FW_CAPS_CONFIG_SWITCH_EGRESS = 0x00000002, +}; + +enum fw_caps_config_nic { + FW_CAPS_CONFIG_NIC = 0x00000001, + FW_CAPS_CONFIG_NIC_VM = 0x00000002, +}; + +enum fw_caps_config_ofld { + FW_CAPS_CONFIG_OFLD = 0x00000001, +}; + +enum fw_caps_config_rdma { + FW_CAPS_CONFIG_RDMA_RDDP = 0x00000001, + FW_CAPS_CONFIG_RDMA_RDMAC = 0x00000002, +}; + +enum fw_caps_config_iscsi { + FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU = 0x00000001, + FW_CAPS_CONFIG_ISCSI_TARGET_PDU = 0x00000002, + FW_CAPS_CONFIG_ISCSI_INITIATOR_CNXOFLD = 0x00000004, + FW_CAPS_CONFIG_ISCSI_TARGET_CNXOFLD = 0x00000008, +}; + +enum fw_caps_config_fcoe { + FW_CAPS_CONFIG_FCOE_INITIATOR = 0x00000001, + FW_CAPS_CONFIG_FCOE_TARGET = 0x00000002, +}; + +struct fw_caps_config_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be32 r2; + __be32 hwmbitmap; + __be16 nbmcaps; + __be16 linkcaps; + __be16 switchcaps; + __be16 r3; + __be16 niccaps; + __be16 ofldcaps; + __be16 rdmacaps; + __be16 r4; + __be16 iscsicaps; + __be16 fcoecaps; + __be32 r5; + __be64 r6; +}; + +/* + * params command mnemonics + */ +enum fw_params_mnem { + FW_PARAMS_MNEM_DEV = 1, /* device params */ + FW_PARAMS_MNEM_PFVF = 2, /* function params */ + FW_PARAMS_MNEM_REG = 3, /* limited register access */ + FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */ + FW_PARAMS_MNEM_LAST +}; + +/* + * device parameters + */ +enum fw_params_param_dev { + FW_PARAMS_PARAM_DEV_CCLK = 0x00, /* chip core clock in khz */ + FW_PARAMS_PARAM_DEV_PORTVEC = 0x01, /* the port vector */ + FW_PARAMS_PARAM_DEV_NTID = 0x02, /* reads the number of TIDs + * allocated by the device's + * Lookup Engine + */ + FW_PARAMS_PARAM_DEV_FLOWC_BUFFIFO_SZ = 0x03, + FW_PARAMS_PARAM_DEV_INTVER_NIC = 0x04, + FW_PARAMS_PARAM_DEV_INTVER_VNIC = 0x05, + FW_PARAMS_PARAM_DEV_INTVER_OFLD = 0x06, + FW_PARAMS_PARAM_DEV_INTVER_RI = 0x07, + FW_PARAMS_PARAM_DEV_INTVER_ISCSIPDU = 0x08, + FW_PARAMS_PARAM_DEV_INTVER_ISCSI = 0x09, + FW_PARAMS_PARAM_DEV_INTVER_FCOE = 0x0A +}; + +/* + * physical and virtual function parameters + */ +enum fw_params_param_pfvf { + FW_PARAMS_PARAM_PFVF_RWXCAPS = 0x00, + FW_PARAMS_PARAM_PFVF_ROUTE_START = 0x01, + FW_PARAMS_PARAM_PFVF_ROUTE_END = 0x02, + FW_PARAMS_PARAM_PFVF_CLIP_START = 0x03, + FW_PARAMS_PARAM_PFVF_CLIP_END = 0x04, + FW_PARAMS_PARAM_PFVF_FILTER_START = 0x05, + FW_PARAMS_PARAM_PFVF_FILTER_END = 0x06, + FW_PARAMS_PARAM_PFVF_SERVER_START = 0x07, + FW_PARAMS_PARAM_PFVF_SERVER_END = 0x08, + FW_PARAMS_PARAM_PFVF_TDDP_START = 0x09, + FW_PARAMS_PARAM_PFVF_TDDP_END = 0x0A, + FW_PARAMS_PARAM_PFVF_ISCSI_START = 0x0B, + FW_PARAMS_PARAM_PFVF_ISCSI_END = 0x0C, + FW_PARAMS_PARAM_PFVF_STAG_START = 0x0D, + FW_PARAMS_PARAM_PFVF_STAG_END = 0x0E, + FW_PARAMS_PARAM_PFVF_RQ_START = 0x1F, + FW_PARAMS_PARAM_PFVF_RQ_END = 0x10, + FW_PARAMS_PARAM_PFVF_PBL_START = 0x11, + FW_PARAMS_PARAM_PFVF_PBL_END = 0x12, + FW_PARAMS_PARAM_PFVF_L2T_START = 0x13, + FW_PARAMS_PARAM_PFVF_L2T_END = 0x14, + FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH = 0x20, +}; + +/* + * dma queue parameters + */ +enum fw_params_param_dmaq { + FW_PARAMS_PARAM_DMAQ_IQ_DCAEN_DCACPU = 0x00, + FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH = 0x01, + FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_MNGT = 0x10, + FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL = 0x11, + FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH = 0x12, +}; + +#define FW_PARAMS_MNEM(x) ((x) << 24) +#define FW_PARAMS_PARAM_X(x) ((x) << 16) +#define FW_PARAMS_PARAM_Y(x) ((x) << 8) +#define FW_PARAMS_PARAM_Z(x) ((x) << 0) +#define FW_PARAMS_PARAM_XYZ(x) ((x) << 0) +#define FW_PARAMS_PARAM_YZ(x) ((x) << 0) + +struct fw_params_cmd { + __be32 op_to_vfn; + __be32 retval_len16; + struct fw_params_param { + __be32 mnem; + __be32 val; + } param[7]; +}; + +#define FW_PARAMS_CMD_PFN(x) ((x) << 8) +#define FW_PARAMS_CMD_VFN(x) ((x) << 0) + +struct fw_pfvf_cmd { + __be32 op_to_vfn; + __be32 retval_len16; + __be32 niqflint_niq; + __be32 cmask_to_neq; + __be32 tc_to_nexactf; + __be32 r_caps_to_nethctrl; + __be16 nricq; + __be16 nriqp; + __be32 r4; +}; + +#define FW_PFVF_CMD_PFN(x) ((x) << 8) +#define FW_PFVF_CMD_VFN(x) ((x) << 0) + +#define FW_PFVF_CMD_NIQFLINT(x) ((x) << 20) +#define FW_PFVF_CMD_NIQFLINT_GET(x) (((x) >> 20) & 0xfff) + +#define FW_PFVF_CMD_NIQ(x) ((x) << 0) +#define FW_PFVF_CMD_NIQ_GET(x) (((x) >> 0) & 0xfffff) + +#define FW_PFVF_CMD_CMASK(x) ((x) << 24) +#define FW_PFVF_CMD_CMASK_GET(x) (((x) >> 24) & 0xf) + +#define FW_PFVF_CMD_PMASK(x) ((x) << 20) +#define FW_PFVF_CMD_PMASK_GET(x) (((x) >> 20) & 0xf) + +#define FW_PFVF_CMD_NEQ(x) ((x) << 0) +#define FW_PFVF_CMD_NEQ_GET(x) (((x) >> 0) & 0xfffff) + +#define FW_PFVF_CMD_TC(x) ((x) << 24) +#define FW_PFVF_CMD_TC_GET(x) (((x) >> 24) & 0xff) + +#define FW_PFVF_CMD_NVI(x) ((x) << 16) +#define FW_PFVF_CMD_NVI_GET(x) (((x) >> 16) & 0xff) + +#define FW_PFVF_CMD_NEXACTF(x) ((x) << 0) +#define FW_PFVF_CMD_NEXACTF_GET(x) (((x) >> 0) & 0xffff) + +#define FW_PFVF_CMD_R_CAPS(x) ((x) << 24) +#define FW_PFVF_CMD_R_CAPS_GET(x) (((x) >> 24) & 0xff) + +#define FW_PFVF_CMD_WX_CAPS(x) ((x) << 16) +#define FW_PFVF_CMD_WX_CAPS_GET(x) (((x) >> 16) & 0xff) + +#define FW_PFVF_CMD_NETHCTRL(x) ((x) << 0) +#define FW_PFVF_CMD_NETHCTRL_GET(x) (((x) >> 0) & 0xffff) + +enum fw_iq_type { + FW_IQ_TYPE_FL_INT_CAP, + FW_IQ_TYPE_NO_FL_INT_CAP +}; + +struct fw_iq_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be16 physiqid; + __be16 iqid; + __be16 fl0id; + __be16 fl1id; + __be32 type_to_iqandstindex; + __be16 iqdroprss_to_iqesize; + __be16 iqsize; + __be64 iqaddr; + __be32 iqns_to_fl0congen; + __be16 fl0dcaen_to_fl0cidxfthresh; + __be16 fl0size; + __be64 fl0addr; + __be32 fl1cngchmap_to_fl1congen; + __be16 fl1dcaen_to_fl1cidxfthresh; + __be16 fl1size; + __be64 fl1addr; +}; + +#define FW_IQ_CMD_PFN(x) ((x) << 8) +#define FW_IQ_CMD_VFN(x) ((x) << 0) + +#define FW_IQ_CMD_ALLOC (1U << 31) +#define FW_IQ_CMD_FREE (1U << 30) +#define FW_IQ_CMD_MODIFY (1U << 29) +#define FW_IQ_CMD_IQSTART(x) ((x) << 28) +#define FW_IQ_CMD_IQSTOP(x) ((x) << 27) + +#define FW_IQ_CMD_TYPE(x) ((x) << 29) +#define FW_IQ_CMD_IQASYNCH(x) ((x) << 28) +#define FW_IQ_CMD_VIID(x) ((x) << 16) +#define FW_IQ_CMD_IQANDST(x) ((x) << 15) +#define FW_IQ_CMD_IQANUS(x) ((x) << 14) +#define FW_IQ_CMD_IQANUD(x) ((x) << 12) +#define FW_IQ_CMD_IQANDSTINDEX(x) ((x) << 0) + +#define FW_IQ_CMD_IQDROPRSS (1U << 15) +#define FW_IQ_CMD_IQGTSMODE (1U << 14) +#define FW_IQ_CMD_IQPCIECH(x) ((x) << 12) +#define FW_IQ_CMD_IQDCAEN(x) ((x) << 11) +#define FW_IQ_CMD_IQDCACPU(x) ((x) << 6) +#define FW_IQ_CMD_IQINTCNTTHRESH(x) ((x) << 4) +#define FW_IQ_CMD_IQO (1U << 3) +#define FW_IQ_CMD_IQCPRIO(x) ((x) << 2) +#define FW_IQ_CMD_IQESIZE(x) ((x) << 0) + +#define FW_IQ_CMD_IQNS(x) ((x) << 31) +#define FW_IQ_CMD_IQRO(x) ((x) << 30) +#define FW_IQ_CMD_IQFLINTIQHSEN(x) ((x) << 28) +#define FW_IQ_CMD_IQFLINTCONGEN(x) ((x) << 27) +#define FW_IQ_CMD_IQFLINTISCSIC(x) ((x) << 26) +#define FW_IQ_CMD_FL0CNGCHMAP(x) ((x) << 20) +#define FW_IQ_CMD_FL0CACHELOCK(x) ((x) << 15) +#define FW_IQ_CMD_FL0DBP(x) ((x) << 14) +#define FW_IQ_CMD_FL0DATANS(x) ((x) << 13) +#define FW_IQ_CMD_FL0DATARO(x) ((x) << 12) +#define FW_IQ_CMD_FL0CONGCIF(x) ((x) << 11) +#define FW_IQ_CMD_FL0ONCHIP(x) ((x) << 10) +#define FW_IQ_CMD_FL0STATUSPGNS(x) ((x) << 9) +#define FW_IQ_CMD_FL0STATUSPGRO(x) ((x) << 8) +#define FW_IQ_CMD_FL0FETCHNS(x) ((x) << 7) +#define FW_IQ_CMD_FL0FETCHRO(x) ((x) << 6) +#define FW_IQ_CMD_FL0HOSTFCMODE(x) ((x) << 4) +#define FW_IQ_CMD_FL0CPRIO(x) ((x) << 3) +#define FW_IQ_CMD_FL0PADEN (1U << 2) +#define FW_IQ_CMD_FL0PACKEN (1U << 1) +#define FW_IQ_CMD_FL0CONGEN (1U << 0) + +#define FW_IQ_CMD_FL0DCAEN(x) ((x) << 15) +#define FW_IQ_CMD_FL0DCACPU(x) ((x) << 10) +#define FW_IQ_CMD_FL0FBMIN(x) ((x) << 7) +#define FW_IQ_CMD_FL0FBMAX(x) ((x) << 4) +#define FW_IQ_CMD_FL0CIDXFTHRESHO (1U << 3) +#define FW_IQ_CMD_FL0CIDXFTHRESH(x) ((x) << 0) + +#define FW_IQ_CMD_FL1CNGCHMAP(x) ((x) << 20) +#define FW_IQ_CMD_FL1CACHELOCK(x) ((x) << 15) +#define FW_IQ_CMD_FL1DBP(x) ((x) << 14) +#define FW_IQ_CMD_FL1DATANS(x) ((x) << 13) +#define FW_IQ_CMD_FL1DATARO(x) ((x) << 12) +#define FW_IQ_CMD_FL1CONGCIF(x) ((x) << 11) +#define FW_IQ_CMD_FL1ONCHIP(x) ((x) << 10) +#define FW_IQ_CMD_FL1STATUSPGNS(x) ((x) << 9) +#define FW_IQ_CMD_FL1STATUSPGRO(x) ((x) << 8) +#define FW_IQ_CMD_FL1FETCHNS(x) ((x) << 7) +#define FW_IQ_CMD_FL1FETCHRO(x) ((x) << 6) +#define FW_IQ_CMD_FL1HOSTFCMODE(x) ((x) << 4) +#define FW_IQ_CMD_FL1CPRIO(x) ((x) << 3) +#define FW_IQ_CMD_FL1PADEN (1U << 2) +#define FW_IQ_CMD_FL1PACKEN (1U << 1) +#define FW_IQ_CMD_FL1CONGEN (1U << 0) + +#define FW_IQ_CMD_FL1DCAEN(x) ((x) << 15) +#define FW_IQ_CMD_FL1DCACPU(x) ((x) << 10) +#define FW_IQ_CMD_FL1FBMIN(x) ((x) << 7) +#define FW_IQ_CMD_FL1FBMAX(x) ((x) << 4) +#define FW_IQ_CMD_FL1CIDXFTHRESHO (1U << 3) +#define FW_IQ_CMD_FL1CIDXFTHRESH(x) ((x) << 0) + +struct fw_eq_eth_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be32 eqid_pkd; + __be32 physeqid_pkd; + __be32 fetchszm_to_iqid; + __be32 dcaen_to_eqsize; + __be64 eqaddr; + __be32 viid_pkd; + __be32 r8_lo; + __be64 r9; +}; + +#define FW_EQ_ETH_CMD_PFN(x) ((x) << 8) +#define FW_EQ_ETH_CMD_VFN(x) ((x) << 0) +#define FW_EQ_ETH_CMD_ALLOC (1U << 31) +#define FW_EQ_ETH_CMD_FREE (1U << 30) +#define FW_EQ_ETH_CMD_MODIFY (1U << 29) +#define FW_EQ_ETH_CMD_EQSTART (1U << 28) +#define FW_EQ_ETH_CMD_EQSTOP (1U << 27) + +#define FW_EQ_ETH_CMD_EQID(x) ((x) << 0) +#define FW_EQ_ETH_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff) +#define FW_EQ_ETH_CMD_PHYSEQID(x) ((x) << 0) + +#define FW_EQ_ETH_CMD_FETCHSZM(x) ((x) << 26) +#define FW_EQ_ETH_CMD_STATUSPGNS(x) ((x) << 25) +#define FW_EQ_ETH_CMD_STATUSPGRO(x) ((x) << 24) +#define FW_EQ_ETH_CMD_FETCHNS(x) ((x) << 23) +#define FW_EQ_ETH_CMD_FETCHRO(x) ((x) << 22) +#define FW_EQ_ETH_CMD_HOSTFCMODE(x) ((x) << 20) +#define FW_EQ_ETH_CMD_CPRIO(x) ((x) << 19) +#define FW_EQ_ETH_CMD_ONCHIP(x) ((x) << 18) +#define FW_EQ_ETH_CMD_PCIECHN(x) ((x) << 16) +#define FW_EQ_ETH_CMD_IQID(x) ((x) << 0) + +#define FW_EQ_ETH_CMD_DCAEN(x) ((x) << 31) +#define FW_EQ_ETH_CMD_DCACPU(x) ((x) << 26) +#define FW_EQ_ETH_CMD_FBMIN(x) ((x) << 23) +#define FW_EQ_ETH_CMD_FBMAX(x) ((x) << 20) +#define FW_EQ_ETH_CMD_CIDXFTHRESHO(x) ((x) << 19) +#define FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << 16) +#define FW_EQ_ETH_CMD_EQSIZE(x) ((x) << 0) + +#define FW_EQ_ETH_CMD_VIID(x) ((x) << 16) + +struct fw_eq_ctrl_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be32 cmpliqid_eqid; + __be32 physeqid_pkd; + __be32 fetchszm_to_iqid; + __be32 dcaen_to_eqsize; + __be64 eqaddr; +}; + +#define FW_EQ_CTRL_CMD_PFN(x) ((x) << 8) +#define FW_EQ_CTRL_CMD_VFN(x) ((x) << 0) + +#define FW_EQ_CTRL_CMD_ALLOC (1U << 31) +#define FW_EQ_CTRL_CMD_FREE (1U << 30) +#define FW_EQ_CTRL_CMD_MODIFY (1U << 29) +#define FW_EQ_CTRL_CMD_EQSTART (1U << 28) +#define FW_EQ_CTRL_CMD_EQSTOP (1U << 27) + +#define FW_EQ_CTRL_CMD_CMPLIQID(x) ((x) << 20) +#define FW_EQ_CTRL_CMD_EQID(x) ((x) << 0) +#define FW_EQ_CTRL_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff) +#define FW_EQ_CTRL_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff) + +#define FW_EQ_CTRL_CMD_FETCHSZM (1U << 26) +#define FW_EQ_CTRL_CMD_STATUSPGNS (1U << 25) +#define FW_EQ_CTRL_CMD_STATUSPGRO (1U << 24) +#define FW_EQ_CTRL_CMD_FETCHNS (1U << 23) +#define FW_EQ_CTRL_CMD_FETCHRO (1U << 22) +#define FW_EQ_CTRL_CMD_HOSTFCMODE(x) ((x) << 20) +#define FW_EQ_CTRL_CMD_CPRIO(x) ((x) << 19) +#define FW_EQ_CTRL_CMD_ONCHIP(x) ((x) << 18) +#define FW_EQ_CTRL_CMD_PCIECHN(x) ((x) << 16) +#define FW_EQ_CTRL_CMD_IQID(x) ((x) << 0) + +#define FW_EQ_CTRL_CMD_DCAEN(x) ((x) << 31) +#define FW_EQ_CTRL_CMD_DCACPU(x) ((x) << 26) +#define FW_EQ_CTRL_CMD_FBMIN(x) ((x) << 23) +#define FW_EQ_CTRL_CMD_FBMAX(x) ((x) << 20) +#define FW_EQ_CTRL_CMD_CIDXFTHRESHO(x) ((x) << 19) +#define FW_EQ_CTRL_CMD_CIDXFTHRESH(x) ((x) << 16) +#define FW_EQ_CTRL_CMD_EQSIZE(x) ((x) << 0) + +struct fw_eq_ofld_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be32 eqid_pkd; + __be32 physeqid_pkd; + __be32 fetchszm_to_iqid; + __be32 dcaen_to_eqsize; + __be64 eqaddr; +}; + +#define FW_EQ_OFLD_CMD_PFN(x) ((x) << 8) +#define FW_EQ_OFLD_CMD_VFN(x) ((x) << 0) + +#define FW_EQ_OFLD_CMD_ALLOC (1U << 31) +#define FW_EQ_OFLD_CMD_FREE (1U << 30) +#define FW_EQ_OFLD_CMD_MODIFY (1U << 29) +#define FW_EQ_OFLD_CMD_EQSTART (1U << 28) +#define FW_EQ_OFLD_CMD_EQSTOP (1U << 27) + +#define FW_EQ_OFLD_CMD_EQID(x) ((x) << 0) +#define FW_EQ_OFLD_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff) +#define FW_EQ_OFLD_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff) + +#define FW_EQ_OFLD_CMD_FETCHSZM(x) ((x) << 26) +#define FW_EQ_OFLD_CMD_STATUSPGNS(x) ((x) << 25) +#define FW_EQ_OFLD_CMD_STATUSPGRO(x) ((x) << 24) +#define FW_EQ_OFLD_CMD_FETCHNS(x) ((x) << 23) +#define FW_EQ_OFLD_CMD_FETCHRO(x) ((x) << 22) +#define FW_EQ_OFLD_CMD_HOSTFCMODE(x) ((x) << 20) +#define FW_EQ_OFLD_CMD_CPRIO(x) ((x) << 19) +#define FW_EQ_OFLD_CMD_ONCHIP(x) ((x) << 18) +#define FW_EQ_OFLD_CMD_PCIECHN(x) ((x) << 16) +#define FW_EQ_OFLD_CMD_IQID(x) ((x) << 0) + +#define FW_EQ_OFLD_CMD_DCAEN(x) ((x) << 31) +#define FW_EQ_OFLD_CMD_DCACPU(x) ((x) << 26) +#define FW_EQ_OFLD_CMD_FBMIN(x) ((x) << 23) +#define FW_EQ_OFLD_CMD_FBMAX(x) ((x) << 20) +#define FW_EQ_OFLD_CMD_CIDXFTHRESHO(x) ((x) << 19) +#define FW_EQ_OFLD_CMD_CIDXFTHRESH(x) ((x) << 16) +#define FW_EQ_OFLD_CMD_EQSIZE(x) ((x) << 0) + +/* + * Macros for VIID parsing: + * VIID - [10:8] PFN, [7] VI Valid, [6:0] VI number + */ +#define FW_VIID_PFN_GET(x) (((x) >> 8) & 0x7) +#define FW_VIID_VIVLD_GET(x) (((x) >> 7) & 0x1) +#define FW_VIID_VIN_GET(x) (((x) >> 0) & 0x7F) + +struct fw_vi_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be16 viid_pkd; + u8 mac[6]; + u8 portid_pkd; + u8 nmac; + u8 nmac0[6]; + __be16 rsssize_pkd; + u8 nmac1[6]; + __be16 r7; + u8 nmac2[6]; + __be16 r8; + u8 nmac3[6]; + __be64 r9; + __be64 r10; +}; + +#define FW_VI_CMD_PFN(x) ((x) << 8) +#define FW_VI_CMD_VFN(x) ((x) << 0) +#define FW_VI_CMD_ALLOC (1U << 31) +#define FW_VI_CMD_FREE (1U << 30) +#define FW_VI_CMD_VIID(x) ((x) << 0) +#define FW_VI_CMD_PORTID(x) ((x) << 4) +#define FW_VI_CMD_RSSSIZE_GET(x) (((x) >> 0) & 0x7ff) + +/* Special VI_MAC command index ids */ +#define FW_VI_MAC_ADD_MAC 0x3FF +#define FW_VI_MAC_ADD_PERSIST_MAC 0x3FE +#define FW_VI_MAC_MAC_BASED_FREE 0x3FD + +enum fw_vi_mac_smac { + FW_VI_MAC_MPS_TCAM_ENTRY, + FW_VI_MAC_MPS_TCAM_ONLY, + FW_VI_MAC_SMT_ONLY, + FW_VI_MAC_SMT_AND_MPSTCAM +}; + +enum fw_vi_mac_result { + FW_VI_MAC_R_SUCCESS, + FW_VI_MAC_R_F_NONEXISTENT_NOMEM, + FW_VI_MAC_R_SMAC_FAIL, + FW_VI_MAC_R_F_ACL_CHECK +}; + +struct fw_vi_mac_cmd { + __be32 op_to_viid; + __be32 freemacs_to_len16; + union fw_vi_mac { + struct fw_vi_mac_exact { + __be16 valid_to_idx; + u8 macaddr[6]; + } exact[7]; + struct fw_vi_mac_hash { + __be64 hashvec; + } hash; + } u; +}; + +#define FW_VI_MAC_CMD_VIID(x) ((x) << 0) +#define FW_VI_MAC_CMD_FREEMACS(x) ((x) << 31) +#define FW_VI_MAC_CMD_HASHVECEN (1U << 23) +#define FW_VI_MAC_CMD_HASHUNIEN(x) ((x) << 22) +#define FW_VI_MAC_CMD_VALID (1U << 15) +#define FW_VI_MAC_CMD_PRIO(x) ((x) << 12) +#define FW_VI_MAC_CMD_SMAC_RESULT(x) ((x) << 10) +#define FW_VI_MAC_CMD_SMAC_RESULT_GET(x) (((x) >> 10) & 0x3) +#define FW_VI_MAC_CMD_IDX(x) ((x) << 0) +#define FW_VI_MAC_CMD_IDX_GET(x) (((x) >> 0) & 0x3ff) + +#define FW_RXMODE_MTU_NO_CHG 65535 + +struct fw_vi_rxmode_cmd { + __be32 op_to_viid; + __be32 retval_len16; + __be32 mtu_to_broadcasten; + __be32 r4_lo; +}; + +#define FW_VI_RXMODE_CMD_VIID(x) ((x) << 0) +#define FW_VI_RXMODE_CMD_MTU(x) ((x) << 16) +#define FW_VI_RXMODE_CMD_PROMISCEN_MASK 0x3 +#define FW_VI_RXMODE_CMD_PROMISCEN(x) ((x) << 14) +#define FW_VI_RXMODE_CMD_ALLMULTIEN_MASK 0x3 +#define FW_VI_RXMODE_CMD_ALLMULTIEN(x) ((x) << 12) +#define FW_VI_RXMODE_CMD_BROADCASTEN_MASK 0x3 +#define FW_VI_RXMODE_CMD_BROADCASTEN(x) ((x) << 10) + +struct fw_vi_enable_cmd { + __be32 op_to_viid; + __be32 ien_to_len16; + __be16 blinkdur; + __be16 r3; + __be32 r4; +}; + +#define FW_VI_ENABLE_CMD_VIID(x) ((x) << 0) +#define FW_VI_ENABLE_CMD_IEN(x) ((x) << 31) +#define FW_VI_ENABLE_CMD_EEN(x) ((x) << 30) +#define FW_VI_ENABLE_CMD_LED (1U << 29) + +/* VI VF stats offset definitions */ +#define VI_VF_NUM_STATS 16 +enum fw_vi_stats_vf_index { + FW_VI_VF_STAT_TX_BCAST_BYTES_IX, + FW_VI_VF_STAT_TX_BCAST_FRAMES_IX, + FW_VI_VF_STAT_TX_MCAST_BYTES_IX, + FW_VI_VF_STAT_TX_MCAST_FRAMES_IX, + FW_VI_VF_STAT_TX_UCAST_BYTES_IX, + FW_VI_VF_STAT_TX_UCAST_FRAMES_IX, + FW_VI_VF_STAT_TX_DROP_FRAMES_IX, + FW_VI_VF_STAT_TX_OFLD_BYTES_IX, + FW_VI_VF_STAT_TX_OFLD_FRAMES_IX, + FW_VI_VF_STAT_RX_BCAST_BYTES_IX, + FW_VI_VF_STAT_RX_BCAST_FRAMES_IX, + FW_VI_VF_STAT_RX_MCAST_BYTES_IX, + FW_VI_VF_STAT_RX_MCAST_FRAMES_IX, + FW_VI_VF_STAT_RX_UCAST_BYTES_IX, + FW_VI_VF_STAT_RX_UCAST_FRAMES_IX, + FW_VI_VF_STAT_RX_ERR_FRAMES_IX +}; + +/* VI PF stats offset definitions */ +#define VI_PF_NUM_STATS 17 +enum fw_vi_stats_pf_index { + FW_VI_PF_STAT_TX_BCAST_BYTES_IX, + FW_VI_PF_STAT_TX_BCAST_FRAMES_IX, + FW_VI_PF_STAT_TX_MCAST_BYTES_IX, + FW_VI_PF_STAT_TX_MCAST_FRAMES_IX, + FW_VI_PF_STAT_TX_UCAST_BYTES_IX, + FW_VI_PF_STAT_TX_UCAST_FRAMES_IX, + FW_VI_PF_STAT_TX_OFLD_BYTES_IX, + FW_VI_PF_STAT_TX_OFLD_FRAMES_IX, + FW_VI_PF_STAT_RX_BYTES_IX, + FW_VI_PF_STAT_RX_FRAMES_IX, + FW_VI_PF_STAT_RX_BCAST_BYTES_IX, + FW_VI_PF_STAT_RX_BCAST_FRAMES_IX, + FW_VI_PF_STAT_RX_MCAST_BYTES_IX, + FW_VI_PF_STAT_RX_MCAST_FRAMES_IX, + FW_VI_PF_STAT_RX_UCAST_BYTES_IX, + FW_VI_PF_STAT_RX_UCAST_FRAMES_IX, + FW_VI_PF_STAT_RX_ERR_FRAMES_IX +}; + +struct fw_vi_stats_cmd { + __be32 op_to_viid; + __be32 retval_len16; + union fw_vi_stats { + struct fw_vi_stats_ctl { + __be16 nstats_ix; + __be16 r6; + __be32 r7; + __be64 stat0; + __be64 stat1; + __be64 stat2; + __be64 stat3; + __be64 stat4; + __be64 stat5; + } ctl; + struct fw_vi_stats_pf { + __be64 tx_bcast_bytes; + __be64 tx_bcast_frames; + __be64 tx_mcast_bytes; + __be64 tx_mcast_frames; + __be64 tx_ucast_bytes; + __be64 tx_ucast_frames; + __be64 tx_offload_bytes; + __be64 tx_offload_frames; + __be64 rx_pf_bytes; + __be64 rx_pf_frames; + __be64 rx_bcast_bytes; + __be64 rx_bcast_frames; + __be64 rx_mcast_bytes; + __be64 rx_mcast_frames; + __be64 rx_ucast_bytes; + __be64 rx_ucast_frames; + __be64 rx_err_frames; + } pf; + struct fw_vi_stats_vf { + __be64 tx_bcast_bytes; + __be64 tx_bcast_frames; + __be64 tx_mcast_bytes; + __be64 tx_mcast_frames; + __be64 tx_ucast_bytes; + __be64 tx_ucast_frames; + __be64 tx_drop_frames; + __be64 tx_offload_bytes; + __be64 tx_offload_frames; + __be64 rx_bcast_bytes; + __be64 rx_bcast_frames; + __be64 rx_mcast_bytes; + __be64 rx_mcast_frames; + __be64 rx_ucast_bytes; + __be64 rx_ucast_frames; + __be64 rx_err_frames; + } vf; + } u; +}; + +#define FW_VI_STATS_CMD_VIID(x) ((x) << 0) +#define FW_VI_STATS_CMD_NSTATS(x) ((x) << 12) +#define FW_VI_STATS_CMD_IX(x) ((x) << 0) + +struct fw_acl_mac_cmd { + __be32 op_to_vfn; + __be32 en_to_len16; + u8 nmac; + u8 r3[7]; + __be16 r4; + u8 macaddr0[6]; + __be16 r5; + u8 macaddr1[6]; + __be16 r6; + u8 macaddr2[6]; + __be16 r7; + u8 macaddr3[6]; +}; + +#define FW_ACL_MAC_CMD_PFN(x) ((x) << 8) +#define FW_ACL_MAC_CMD_VFN(x) ((x) << 0) +#define FW_ACL_MAC_CMD_EN(x) ((x) << 31) + +struct fw_acl_vlan_cmd { + __be32 op_to_vfn; + __be32 en_to_len16; + u8 nvlan; + u8 dropnovlan_fm; + u8 r3_lo[6]; + __be16 vlanid[16]; +}; + +#define FW_ACL_VLAN_CMD_PFN(x) ((x) << 8) +#define FW_ACL_VLAN_CMD_VFN(x) ((x) << 0) +#define FW_ACL_VLAN_CMD_EN(x) ((x) << 31) +#define FW_ACL_VLAN_CMD_DROPNOVLAN(x) ((x) << 7) +#define FW_ACL_VLAN_CMD_FM(x) ((x) << 6) + +enum fw_port_cap { + FW_PORT_CAP_SPEED_100M = 0x0001, + FW_PORT_CAP_SPEED_1G = 0x0002, + FW_PORT_CAP_SPEED_2_5G = 0x0004, + FW_PORT_CAP_SPEED_10G = 0x0008, + FW_PORT_CAP_SPEED_40G = 0x0010, + FW_PORT_CAP_SPEED_100G = 0x0020, + FW_PORT_CAP_FC_RX = 0x0040, + FW_PORT_CAP_FC_TX = 0x0080, + FW_PORT_CAP_ANEG = 0x0100, + FW_PORT_CAP_MDI_0 = 0x0200, + FW_PORT_CAP_MDI_1 = 0x0400, + FW_PORT_CAP_BEAN = 0x0800, + FW_PORT_CAP_PMA_LPBK = 0x1000, + FW_PORT_CAP_PCS_LPBK = 0x2000, + FW_PORT_CAP_PHYXS_LPBK = 0x4000, + FW_PORT_CAP_FAR_END_LPBK = 0x8000, +}; + +enum fw_port_mdi { + FW_PORT_MDI_UNCHANGED, + FW_PORT_MDI_AUTO, + FW_PORT_MDI_F_STRAIGHT, + FW_PORT_MDI_F_CROSSOVER +}; + +#define FW_PORT_MDI(x) ((x) << 9) + +enum fw_port_action { + FW_PORT_ACTION_L1_CFG = 0x0001, + FW_PORT_ACTION_L2_CFG = 0x0002, + FW_PORT_ACTION_GET_PORT_INFO = 0x0003, + FW_PORT_ACTION_L2_PPP_CFG = 0x0004, + FW_PORT_ACTION_L2_DCB_CFG = 0x0005, + FW_PORT_ACTION_LOW_PWR_TO_NORMAL = 0x0010, + FW_PORT_ACTION_L1_LOW_PWR_EN = 0x0011, + FW_PORT_ACTION_L2_WOL_MODE_EN = 0x0012, + FW_PORT_ACTION_LPBK_TO_NORMAL = 0x0020, + FW_PORT_ACTION_L1_LPBK = 0x0021, + FW_PORT_ACTION_L1_PMA_LPBK = 0x0022, + FW_PORT_ACTION_L1_PCS_LPBK = 0x0023, + FW_PORT_ACTION_L1_PHYXS_CSIDE_LPBK = 0x0024, + FW_PORT_ACTION_L1_PHYXS_ESIDE_LPBK = 0x0025, + FW_PORT_ACTION_PHY_RESET = 0x0040, + FW_PORT_ACTION_PMA_RESET = 0x0041, + FW_PORT_ACTION_PCS_RESET = 0x0042, + FW_PORT_ACTION_PHYXS_RESET = 0x0043, + FW_PORT_ACTION_DTEXS_REEST = 0x0044, + FW_PORT_ACTION_AN_RESET = 0x0045 +}; + +enum fw_port_l2cfg_ctlbf { + FW_PORT_L2_CTLBF_OVLAN0 = 0x01, + FW_PORT_L2_CTLBF_OVLAN1 = 0x02, + FW_PORT_L2_CTLBF_OVLAN2 = 0x04, + FW_PORT_L2_CTLBF_OVLAN3 = 0x08, + FW_PORT_L2_CTLBF_IVLAN = 0x10, + FW_PORT_L2_CTLBF_TXIPG = 0x20 +}; + +enum fw_port_dcb_cfg { + FW_PORT_DCB_CFG_PG = 0x01, + FW_PORT_DCB_CFG_PFC = 0x02, + FW_PORT_DCB_CFG_APPL = 0x04 +}; + +enum fw_port_dcb_cfg_rc { + FW_PORT_DCB_CFG_SUCCESS = 0x0, + FW_PORT_DCB_CFG_ERROR = 0x1 +}; + +struct fw_port_cmd { + __be32 op_to_portid; + __be32 action_to_len16; + union fw_port { + struct fw_port_l1cfg { + __be32 rcap; + __be32 r; + } l1cfg; + struct fw_port_l2cfg { + __be16 ctlbf_to_ivlan0; + __be16 ivlantype; + __be32 txipg_pkd; + __be16 ovlan0mask; + __be16 ovlan0type; + __be16 ovlan1mask; + __be16 ovlan1type; + __be16 ovlan2mask; + __be16 ovlan2type; + __be16 ovlan3mask; + __be16 ovlan3type; + } l2cfg; + struct fw_port_info { + __be32 lstatus_to_modtype; + __be16 pcap; + __be16 acap; + } info; + struct fw_port_ppp { + __be32 pppen_to_ncsich; + __be32 r11; + } ppp; + struct fw_port_dcb { + __be16 cfg; + u8 up_map; + u8 sf_cfgrc; + __be16 prot_ix; + u8 pe7_to_pe0; + u8 numTCPFCs; + __be32 pgid0_to_pgid7; + __be32 numTCs_oui; + u8 pgpc[8]; + } dcb; + } u; +}; + +#define FW_PORT_CMD_READ (1U << 22) + +#define FW_PORT_CMD_PORTID(x) ((x) << 0) +#define FW_PORT_CMD_PORTID_GET(x) (((x) >> 0) & 0xf) + +#define FW_PORT_CMD_ACTION(x) ((x) << 16) + +#define FW_PORT_CMD_CTLBF(x) ((x) << 10) +#define FW_PORT_CMD_OVLAN3(x) ((x) << 7) +#define FW_PORT_CMD_OVLAN2(x) ((x) << 6) +#define FW_PORT_CMD_OVLAN1(x) ((x) << 5) +#define FW_PORT_CMD_OVLAN0(x) ((x) << 4) +#define FW_PORT_CMD_IVLAN0(x) ((x) << 3) + +#define FW_PORT_CMD_TXIPG(x) ((x) << 19) + +#define FW_PORT_CMD_LSTATUS (1U << 31) +#define FW_PORT_CMD_LSPEED(x) ((x) << 24) +#define FW_PORT_CMD_LSPEED_GET(x) (((x) >> 24) & 0x3f) +#define FW_PORT_CMD_TXPAUSE (1U << 23) +#define FW_PORT_CMD_RXPAUSE (1U << 22) +#define FW_PORT_CMD_MDIOCAP (1U << 21) +#define FW_PORT_CMD_MDIOADDR_GET(x) (((x) >> 16) & 0x1f) +#define FW_PORT_CMD_LPTXPAUSE (1U << 15) +#define FW_PORT_CMD_LPRXPAUSE (1U << 14) +#define FW_PORT_CMD_PTYPE_MASK 0x1f +#define FW_PORT_CMD_PTYPE_GET(x) (((x) >> 8) & FW_PORT_CMD_PTYPE_MASK) +#define FW_PORT_CMD_MODTYPE_MASK 0x1f +#define FW_PORT_CMD_MODTYPE_GET(x) (((x) >> 0) & FW_PORT_CMD_MODTYPE_MASK) + +#define FW_PORT_CMD_PPPEN(x) ((x) << 31) +#define FW_PORT_CMD_TPSRC(x) ((x) << 28) +#define FW_PORT_CMD_NCSISRC(x) ((x) << 24) + +#define FW_PORT_CMD_CH0(x) ((x) << 20) +#define FW_PORT_CMD_CH1(x) ((x) << 16) +#define FW_PORT_CMD_CH2(x) ((x) << 12) +#define FW_PORT_CMD_CH3(x) ((x) << 8) +#define FW_PORT_CMD_NCSICH(x) ((x) << 4) + +enum fw_port_type { + FW_PORT_TYPE_FIBER, + FW_PORT_TYPE_KX4, + FW_PORT_TYPE_BT_SGMII, + FW_PORT_TYPE_KX, + FW_PORT_TYPE_BT_XAUI, + FW_PORT_TYPE_KR, + FW_PORT_TYPE_CX4, + FW_PORT_TYPE_TWINAX, + + FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK +}; + +enum fw_port_module_type { + FW_PORT_MOD_TYPE_NA, + FW_PORT_MOD_TYPE_LR, + FW_PORT_MOD_TYPE_SR, + FW_PORT_MOD_TYPE_ER, + + FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_MASK +}; + +/* port stats */ +#define FW_NUM_PORT_STATS 50 +#define FW_NUM_PORT_TX_STATS 23 +#define FW_NUM_PORT_RX_STATS 27 + +enum fw_port_stats_tx_index { + FW_STAT_TX_PORT_BYTES_IX, + FW_STAT_TX_PORT_FRAMES_IX, + FW_STAT_TX_PORT_BCAST_IX, + FW_STAT_TX_PORT_MCAST_IX, + FW_STAT_TX_PORT_UCAST_IX, + FW_STAT_TX_PORT_ERROR_IX, + FW_STAT_TX_PORT_64B_IX, + FW_STAT_TX_PORT_65B_127B_IX, + FW_STAT_TX_PORT_128B_255B_IX, + FW_STAT_TX_PORT_256B_511B_IX, + FW_STAT_TX_PORT_512B_1023B_IX, + FW_STAT_TX_PORT_1024B_1518B_IX, + FW_STAT_TX_PORT_1519B_MAX_IX, + FW_STAT_TX_PORT_DROP_IX, + FW_STAT_TX_PORT_PAUSE_IX, + FW_STAT_TX_PORT_PPP0_IX, + FW_STAT_TX_PORT_PPP1_IX, + FW_STAT_TX_PORT_PPP2_IX, + FW_STAT_TX_PORT_PPP3_IX, + FW_STAT_TX_PORT_PPP4_IX, + FW_STAT_TX_PORT_PPP5_IX, + FW_STAT_TX_PORT_PPP6_IX, + FW_STAT_TX_PORT_PPP7_IX +}; + +enum fw_port_stat_rx_index { + FW_STAT_RX_PORT_BYTES_IX, + FW_STAT_RX_PORT_FRAMES_IX, + FW_STAT_RX_PORT_BCAST_IX, + FW_STAT_RX_PORT_MCAST_IX, + FW_STAT_RX_PORT_UCAST_IX, + FW_STAT_RX_PORT_MTU_ERROR_IX, + FW_STAT_RX_PORT_MTU_CRC_ERROR_IX, + FW_STAT_RX_PORT_CRC_ERROR_IX, + FW_STAT_RX_PORT_LEN_ERROR_IX, + FW_STAT_RX_PORT_SYM_ERROR_IX, + FW_STAT_RX_PORT_64B_IX, + FW_STAT_RX_PORT_65B_127B_IX, + FW_STAT_RX_PORT_128B_255B_IX, + FW_STAT_RX_PORT_256B_511B_IX, + FW_STAT_RX_PORT_512B_1023B_IX, + FW_STAT_RX_PORT_1024B_1518B_IX, + FW_STAT_RX_PORT_1519B_MAX_IX, + FW_STAT_RX_PORT_PAUSE_IX, + FW_STAT_RX_PORT_PPP0_IX, + FW_STAT_RX_PORT_PPP1_IX, + FW_STAT_RX_PORT_PPP2_IX, + FW_STAT_RX_PORT_PPP3_IX, + FW_STAT_RX_PORT_PPP4_IX, + FW_STAT_RX_PORT_PPP5_IX, + FW_STAT_RX_PORT_PPP6_IX, + FW_STAT_RX_PORT_PPP7_IX, + FW_STAT_RX_PORT_LESS_64B_IX +}; + +struct fw_port_stats_cmd { + __be32 op_to_portid; + __be32 retval_len16; + union fw_port_stats { + struct fw_port_stats_ctl { + u8 nstats_bg_bm; + u8 tx_ix; + __be16 r6; + __be32 r7; + __be64 stat0; + __be64 stat1; + __be64 stat2; + __be64 stat3; + __be64 stat4; + __be64 stat5; + } ctl; + struct fw_port_stats_all { + __be64 tx_bytes; + __be64 tx_frames; + __be64 tx_bcast; + __be64 tx_mcast; + __be64 tx_ucast; + __be64 tx_error; + __be64 tx_64b; + __be64 tx_65b_127b; + __be64 tx_128b_255b; + __be64 tx_256b_511b; + __be64 tx_512b_1023b; + __be64 tx_1024b_1518b; + __be64 tx_1519b_max; + __be64 tx_drop; + __be64 tx_pause; + __be64 tx_ppp0; + __be64 tx_ppp1; + __be64 tx_ppp2; + __be64 tx_ppp3; + __be64 tx_ppp4; + __be64 tx_ppp5; + __be64 tx_ppp6; + __be64 tx_ppp7; + __be64 rx_bytes; + __be64 rx_frames; + __be64 rx_bcast; + __be64 rx_mcast; + __be64 rx_ucast; + __be64 rx_mtu_error; + __be64 rx_mtu_crc_error; + __be64 rx_crc_error; + __be64 rx_len_error; + __be64 rx_sym_error; + __be64 rx_64b; + __be64 rx_65b_127b; + __be64 rx_128b_255b; + __be64 rx_256b_511b; + __be64 rx_512b_1023b; + __be64 rx_1024b_1518b; + __be64 rx_1519b_max; + __be64 rx_pause; + __be64 rx_ppp0; + __be64 rx_ppp1; + __be64 rx_ppp2; + __be64 rx_ppp3; + __be64 rx_ppp4; + __be64 rx_ppp5; + __be64 rx_ppp6; + __be64 rx_ppp7; + __be64 rx_less_64b; + __be64 rx_bg_drop; + __be64 rx_bg_trunc; + } all; + } u; +}; + +#define FW_PORT_STATS_CMD_NSTATS(x) ((x) << 4) +#define FW_PORT_STATS_CMD_BG_BM(x) ((x) << 0) +#define FW_PORT_STATS_CMD_TX(x) ((x) << 7) +#define FW_PORT_STATS_CMD_IX(x) ((x) << 0) + +/* port loopback stats */ +#define FW_NUM_LB_STATS 16 +enum fw_port_lb_stats_index { + FW_STAT_LB_PORT_BYTES_IX, + FW_STAT_LB_PORT_FRAMES_IX, + FW_STAT_LB_PORT_BCAST_IX, + FW_STAT_LB_PORT_MCAST_IX, + FW_STAT_LB_PORT_UCAST_IX, + FW_STAT_LB_PORT_ERROR_IX, + FW_STAT_LB_PORT_64B_IX, + FW_STAT_LB_PORT_65B_127B_IX, + FW_STAT_LB_PORT_128B_255B_IX, + FW_STAT_LB_PORT_256B_511B_IX, + FW_STAT_LB_PORT_512B_1023B_IX, + FW_STAT_LB_PORT_1024B_1518B_IX, + FW_STAT_LB_PORT_1519B_MAX_IX, + FW_STAT_LB_PORT_DROP_FRAMES_IX +}; + +struct fw_port_lb_stats_cmd { + __be32 op_to_lbport; + __be32 retval_len16; + union fw_port_lb_stats { + struct fw_port_lb_stats_ctl { + u8 nstats_bg_bm; + u8 ix_pkd; + __be16 r6; + __be32 r7; + __be64 stat0; + __be64 stat1; + __be64 stat2; + __be64 stat3; + __be64 stat4; + __be64 stat5; + } ctl; + struct fw_port_lb_stats_all { + __be64 tx_bytes; + __be64 tx_frames; + __be64 tx_bcast; + __be64 tx_mcast; + __be64 tx_ucast; + __be64 tx_error; + __be64 tx_64b; + __be64 tx_65b_127b; + __be64 tx_128b_255b; + __be64 tx_256b_511b; + __be64 tx_512b_1023b; + __be64 tx_1024b_1518b; + __be64 tx_1519b_max; + __be64 rx_lb_drop; + __be64 rx_lb_trunc; + } all; + } u; +}; + +#define FW_PORT_LB_STATS_CMD_LBPORT(x) ((x) << 0) +#define FW_PORT_LB_STATS_CMD_NSTATS(x) ((x) << 4) +#define FW_PORT_LB_STATS_CMD_BG_BM(x) ((x) << 0) +#define FW_PORT_LB_STATS_CMD_IX(x) ((x) << 0) + +struct fw_rss_ind_tbl_cmd { + __be32 op_to_viid; +#define FW_RSS_IND_TBL_CMD_VIID(x) ((x) << 0) + __be32 retval_len16; + __be16 niqid; + __be16 startidx; + __be32 r3; + __be32 iq0_to_iq2; +#define FW_RSS_IND_TBL_CMD_IQ0(x) ((x) << 20) +#define FW_RSS_IND_TBL_CMD_IQ1(x) ((x) << 10) +#define FW_RSS_IND_TBL_CMD_IQ2(x) ((x) << 0) + __be32 iq3_to_iq5; + __be32 iq6_to_iq8; + __be32 iq9_to_iq11; + __be32 iq12_to_iq14; + __be32 iq15_to_iq17; + __be32 iq18_to_iq20; + __be32 iq21_to_iq23; + __be32 iq24_to_iq26; + __be32 iq27_to_iq29; + __be32 iq30_iq31; + __be32 r15_lo; +}; + +struct fw_rss_glb_config_cmd { + __be32 op_to_write; + __be32 retval_len16; + union fw_rss_glb_config { + struct fw_rss_glb_config_manual { + __be32 mode_pkd; + __be32 r3; + __be64 r4; + __be64 r5; + } manual; + struct fw_rss_glb_config_basicvirtual { + __be32 mode_pkd; + __be32 synmapen_to_hashtoeplitz; +#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN (1U << 8) +#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 (1U << 7) +#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 (1U << 6) +#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 (1U << 5) +#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 (1U << 4) +#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN (1U << 3) +#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN (1U << 2) +#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP (1U << 1) +#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ (1U << 0) + __be64 r8; + __be64 r9; + } basicvirtual; + } u; +}; + +#define FW_RSS_GLB_CONFIG_CMD_MODE(x) ((x) << 28) + +#define FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL 0 +#define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL 1 + +struct fw_rss_vi_config_cmd { + __be32 op_to_viid; +#define FW_RSS_VI_CONFIG_CMD_VIID(x) ((x) << 0) + __be32 retval_len16; + union fw_rss_vi_config { + struct fw_rss_vi_config_manual { + __be64 r3; + __be64 r4; + __be64 r5; + } manual; + struct fw_rss_vi_config_basicvirtual { + __be32 r6; + __be32 defaultq_to_ip4udpen; +#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) ((x) << 16) +#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN (1U << 4) +#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN (1U << 3) +#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN (1U << 2) +#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN (1U << 1) +#define FW_RSS_VI_CONFIG_CMD_IP4UDPEN (1U << 0) + __be64 r9; + __be64 r10; + } basicvirtual; + } u; +}; + +enum fw_error_type { + FW_ERROR_TYPE_EXCEPTION = 0x0, + FW_ERROR_TYPE_HWMODULE = 0x1, + FW_ERROR_TYPE_WR = 0x2, + FW_ERROR_TYPE_ACL = 0x3, +}; + +struct fw_error_cmd { + __be32 op_to_type; + __be32 len16_pkd; + union fw_error { + struct fw_error_exception { + __be32 info[6]; + } exception; + struct fw_error_hwmodule { + __be32 regaddr; + __be32 regval; + } hwmodule; + struct fw_error_wr { + __be16 cidx; + __be16 pfn_vfn; + __be32 eqid; + u8 wrhdr[16]; + } wr; + struct fw_error_acl { + __be16 cidx; + __be16 pfn_vfn; + __be32 eqid; + __be16 mv_pkd; + u8 val[6]; + __be64 r4; + } acl; + } u; +}; + +struct fw_debug_cmd { + __be32 op_type; +#define FW_DEBUG_CMD_TYPE_GET(x) ((x) & 0xff) + __be32 len16_pkd; + union fw_debug { + struct fw_debug_assert { + __be32 fcid; + __be32 line; + __be32 x; + __be32 y; + u8 filename_0_7[8]; + u8 filename_8_15[8]; + __be64 r3; + } assert; + struct fw_debug_prt { + __be16 dprtstridx; + __be16 r3[3]; + __be32 dprtstrparam0; + __be32 dprtstrparam1; + __be32 dprtstrparam2; + __be32 dprtstrparam3; + } prt; + } u; +}; + +struct fw_hdr { + u8 ver; + u8 reserved1; + __be16 len512; /* bin length in units of 512-bytes */ + __be32 fw_ver; /* firmware version */ + __be32 tp_microcode_ver; + u8 intfver_nic; + u8 intfver_vnic; + u8 intfver_ofld; + u8 intfver_ri; + u8 intfver_iscsipdu; + u8 intfver_iscsi; + u8 intfver_fcoe; + u8 reserved2; + __be32 reserved3[27]; +}; + +#define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff) +#define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff) +#define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff) +#define FW_HDR_FW_VER_BUILD_GET(x) (((x) >> 0) & 0xff) +#endif /* _T4FW_INTERFACE_H_ */ -- cgit v1.2.3 From 56d36be4dd5fc7b33bff7986737aff79c790184a Mon Sep 17 00:00:00 2001 From: Dimitris Michailidis Date: Thu, 1 Apr 2010 15:28:23 +0000 Subject: cxgb4: Add HW and FW support code Signed-off-by: Dimitris Michailidis Signed-off-by: David S. Miller --- drivers/net/cxgb4/t4_hw.c | 3131 +++++++++++++++++++++++++++++++++++++++++++++ drivers/net/cxgb4/t4_hw.h | 100 ++ 2 files changed, 3231 insertions(+) create mode 100644 drivers/net/cxgb4/t4_hw.c create mode 100644 drivers/net/cxgb4/t4_hw.h diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c new file mode 100644 index 000000000000..a814a3afe123 --- /dev/null +++ b/drivers/net/cxgb4/t4_hw.c @@ -0,0 +1,3131 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include "cxgb4.h" +#include "t4_regs.h" +#include "t4fw_api.h" + +/** + * t4_wait_op_done_val - wait until an operation is completed + * @adapter: the adapter performing the operation + * @reg: the register to check for completion + * @mask: a single-bit field within @reg that indicates completion + * @polarity: the value of the field when the operation is completed + * @attempts: number of check iterations + * @delay: delay in usecs between iterations + * @valp: where to store the value of the register at completion time + * + * Wait until an operation is completed by checking a bit in a register + * up to @attempts times. If @valp is not NULL the value of the register + * at the time it indicated completion is stored there. Returns 0 if the + * operation completes and -EAGAIN otherwise. + */ +int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, + int polarity, int attempts, int delay, u32 *valp) +{ + while (1) { + u32 val = t4_read_reg(adapter, reg); + + if (!!(val & mask) == polarity) { + if (valp) + *valp = val; + return 0; + } + if (--attempts == 0) + return -EAGAIN; + if (delay) + udelay(delay); + } +} + +static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, + int polarity, int attempts, int delay) +{ + return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts, + delay, NULL); +} + +/** + * t4_set_reg_field - set a register field to a value + * @adapter: the adapter to program + * @addr: the register address + * @mask: specifies the portion of the register to modify + * @val: the new value for the register field + * + * Sets a register field specified by the supplied mask to the + * given value. + */ +void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, + u32 val) +{ + u32 v = t4_read_reg(adapter, addr) & ~mask; + + t4_write_reg(adapter, addr, v | val); + (void) t4_read_reg(adapter, addr); /* flush */ +} + +/** + * t4_read_indirect - read indirectly addressed registers + * @adap: the adapter + * @addr_reg: register holding the indirect address + * @data_reg: register holding the value of the indirect register + * @vals: where the read register values are stored + * @nregs: how many indirect registers to read + * @start_idx: index of first indirect register to read + * + * Reads registers that are accessed indirectly through an address/data + * register pair. + */ +void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, + unsigned int data_reg, u32 *vals, unsigned int nregs, + unsigned int start_idx) +{ + while (nregs--) { + t4_write_reg(adap, addr_reg, start_idx); + *vals++ = t4_read_reg(adap, data_reg); + start_idx++; + } +} + +/** + * t4_write_indirect - write indirectly addressed registers + * @adap: the adapter + * @addr_reg: register holding the indirect addresses + * @data_reg: register holding the value for the indirect registers + * @vals: values to write + * @nregs: how many indirect registers to write + * @start_idx: address of first indirect register to write + * + * Writes a sequential block of registers that are accessed indirectly + * through an address/data register pair. + */ +void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, + unsigned int data_reg, const u32 *vals, + unsigned int nregs, unsigned int start_idx) +{ + while (nregs--) { + t4_write_reg(adap, addr_reg, start_idx++); + t4_write_reg(adap, data_reg, *vals++); + } +} + +/* + * Get the reply to a mailbox command and store it in @rpl in big-endian order. + */ +static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, + u32 mbox_addr) +{ + for ( ; nflit; nflit--, mbox_addr += 8) + *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr)); +} + +/* + * Handle a FW assertion reported in a mailbox. + */ +static void fw_asrt(struct adapter *adap, u32 mbox_addr) +{ + struct fw_debug_cmd asrt; + + get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr); + dev_alert(adap->pdev_dev, + "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", + asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line), + ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y)); +} + +static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg) +{ + dev_err(adap->pdev_dev, + "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox, + (unsigned long long)t4_read_reg64(adap, data_reg), + (unsigned long long)t4_read_reg64(adap, data_reg + 8), + (unsigned long long)t4_read_reg64(adap, data_reg + 16), + (unsigned long long)t4_read_reg64(adap, data_reg + 24), + (unsigned long long)t4_read_reg64(adap, data_reg + 32), + (unsigned long long)t4_read_reg64(adap, data_reg + 40), + (unsigned long long)t4_read_reg64(adap, data_reg + 48), + (unsigned long long)t4_read_reg64(adap, data_reg + 56)); +} + +/** + * t4_wr_mbox_meat - send a command to FW through the given mailbox + * @adap: the adapter + * @mbox: index of the mailbox to use + * @cmd: the command to write + * @size: command length in bytes + * @rpl: where to optionally store the reply + * @sleep_ok: if true we may sleep while awaiting command completion + * + * Sends the given command to FW through the selected mailbox and waits + * for the FW to execute the command. If @rpl is not %NULL it is used to + * store the FW's reply to the command. The command and its optional + * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms + * to respond. @sleep_ok determines whether we may sleep while awaiting + * the response. If sleeping is allowed we use progressive backoff + * otherwise we spin. + * + * The return value is 0 on success or a negative errno on failure. A + * failure can happen either because we are not able to execute the + * command or FW executes it but signals an error. In the latter case + * the return value is the error code indicated by FW (negated). + */ +int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, + void *rpl, bool sleep_ok) +{ + static int delay[] = { + 1, 1, 3, 5, 10, 10, 20, 50, 100, 200 + }; + + u32 v; + u64 res; + int i, ms, delay_idx; + const __be64 *p = cmd; + u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA); + u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL); + + if ((size & 15) || size > MBOX_LEN) + return -EINVAL; + + v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); + for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) + v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); + + if (v != MBOX_OWNER_DRV) + return v ? -EBUSY : -ETIMEDOUT; + + for (i = 0; i < size; i += 8) + t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++)); + + t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); + t4_read_reg(adap, ctl_reg); /* flush write */ + + delay_idx = 0; + ms = delay[0]; + + for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { + if (sleep_ok) { + ms = delay[delay_idx]; /* last element may repeat */ + if (delay_idx < ARRAY_SIZE(delay) - 1) + delay_idx++; + msleep(ms); + } else + mdelay(ms); + + v = t4_read_reg(adap, ctl_reg); + if (MBOWNER_GET(v) == MBOX_OWNER_DRV) { + if (!(v & MBMSGVALID)) { + t4_write_reg(adap, ctl_reg, 0); + continue; + } + + res = t4_read_reg64(adap, data_reg); + if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) { + fw_asrt(adap, data_reg); + res = FW_CMD_RETVAL(EIO); + } else if (rpl) + get_mbox_rpl(adap, rpl, size / 8, data_reg); + + if (FW_CMD_RETVAL_GET((int)res)) + dump_mbox(adap, mbox, data_reg); + t4_write_reg(adap, ctl_reg, 0); + return -FW_CMD_RETVAL_GET((int)res); + } + } + + dump_mbox(adap, mbox, data_reg); + dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", + *(const u8 *)cmd, mbox); + return -ETIMEDOUT; +} + +/** + * t4_mc_read - read from MC through backdoor accesses + * @adap: the adapter + * @addr: address of first byte requested + * @data: 64 bytes of data containing the requested address + * @ecc: where to store the corresponding 64-bit ECC word + * + * Read 64 bytes of data from MC starting at a 64-byte-aligned address + * that covers the requested address @addr. If @parity is not %NULL it + * is assigned the 64-bit ECC word for the read data. + */ +int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc) +{ + int i; + + if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST) + return -EBUSY; + t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU); + t4_write_reg(adap, MC_BIST_CMD_LEN, 64); + t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc); + t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST | + BIST_CMD_GAP(1)); + i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1); + if (i) + return i; + +#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i) + + for (i = 15; i >= 0; i--) + *data++ = htonl(t4_read_reg(adap, MC_DATA(i))); + if (ecc) + *ecc = t4_read_reg64(adap, MC_DATA(16)); +#undef MC_DATA + return 0; +} + +/** + * t4_edc_read - read from EDC through backdoor accesses + * @adap: the adapter + * @idx: which EDC to access + * @addr: address of first byte requested + * @data: 64 bytes of data containing the requested address + * @ecc: where to store the corresponding 64-bit ECC word + * + * Read 64 bytes of data from EDC starting at a 64-byte-aligned address + * that covers the requested address @addr. If @parity is not %NULL it + * is assigned the 64-bit ECC word for the read data. + */ +int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) +{ + int i; + + idx *= EDC_STRIDE; + if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST) + return -EBUSY; + t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU); + t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64); + t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc); + t4_write_reg(adap, EDC_BIST_CMD + idx, + BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST); + i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1); + if (i) + return i; + +#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx) + + for (i = 15; i >= 0; i--) + *data++ = htonl(t4_read_reg(adap, EDC_DATA(i))); + if (ecc) + *ecc = t4_read_reg64(adap, EDC_DATA(16)); +#undef EDC_DATA + return 0; +} + +#define VPD_ENTRY(name, len) \ + u8 name##_kword[2]; u8 name##_len; u8 name##_data[len] + +/* + * Partial EEPROM Vital Product Data structure. Includes only the ID and + * VPD-R sections. + */ +struct t4_vpd { + u8 id_tag; + u8 id_len[2]; + u8 id_data[ID_LEN]; + u8 vpdr_tag; + u8 vpdr_len[2]; + VPD_ENTRY(pn, 16); /* part number */ + VPD_ENTRY(ec, EC_LEN); /* EC level */ + VPD_ENTRY(sn, SERNUM_LEN); /* serial number */ + VPD_ENTRY(na, 12); /* MAC address base */ + VPD_ENTRY(port_type, 8); /* port types */ + VPD_ENTRY(gpio, 14); /* GPIO usage */ + VPD_ENTRY(cclk, 6); /* core clock */ + VPD_ENTRY(port_addr, 8); /* port MDIO addresses */ + VPD_ENTRY(rv, 1); /* csum */ + u32 pad; /* for multiple-of-4 sizing and alignment */ +}; + +#define EEPROM_STAT_ADDR 0x7bfc +#define VPD_BASE 0 + +/** + * t4_seeprom_wp - enable/disable EEPROM write protection + * @adapter: the adapter + * @enable: whether to enable or disable write protection + * + * Enables or disables write protection on the serial EEPROM. + */ +int t4_seeprom_wp(struct adapter *adapter, bool enable) +{ + unsigned int v = enable ? 0xc : 0; + int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v); + return ret < 0 ? ret : 0; +} + +/** + * get_vpd_params - read VPD parameters from VPD EEPROM + * @adapter: adapter to read + * @p: where to store the parameters + * + * Reads card parameters stored in VPD EEPROM. + */ +static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) +{ + int ret; + struct t4_vpd vpd; + u8 *q = (u8 *)&vpd, csum; + + ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), &vpd); + if (ret < 0) + return ret; + + for (csum = 0; q <= vpd.rv_data; q++) + csum += *q; + + if (csum) { + dev_err(adapter->pdev_dev, + "corrupted VPD EEPROM, actual csum %u\n", csum); + return -EINVAL; + } + + p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10); + memcpy(p->id, vpd.id_data, sizeof(vpd.id_data)); + strim(p->id); + memcpy(p->ec, vpd.ec_data, sizeof(vpd.ec_data)); + strim(p->ec); + memcpy(p->sn, vpd.sn_data, sizeof(vpd.sn_data)); + strim(p->sn); + return 0; +} + +/* serial flash and firmware constants */ +enum { + SF_ATTEMPTS = 10, /* max retries for SF operations */ + + /* flash command opcodes */ + SF_PROG_PAGE = 2, /* program page */ + SF_WR_DISABLE = 4, /* disable writes */ + SF_RD_STATUS = 5, /* read status register */ + SF_WR_ENABLE = 6, /* enable writes */ + SF_RD_DATA_FAST = 0xb, /* read flash */ + SF_ERASE_SECTOR = 0xd8, /* erase sector */ + + FW_START_SEC = 8, /* first flash sector for FW */ + FW_END_SEC = 15, /* last flash sector for FW */ + FW_IMG_START = FW_START_SEC * SF_SEC_SIZE, + FW_MAX_SIZE = (FW_END_SEC - FW_START_SEC + 1) * SF_SEC_SIZE, +}; + +/** + * sf1_read - read data from the serial flash + * @adapter: the adapter + * @byte_cnt: number of bytes to read + * @cont: whether another operation will be chained + * @lock: whether to lock SF for PL access only + * @valp: where to store the read data + * + * Reads up to 4 bytes of data from the serial flash. The location of + * the read needs to be specified prior to calling this by issuing the + * appropriate commands to the serial flash. + */ +static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, + int lock, u32 *valp) +{ + int ret; + + if (!byte_cnt || byte_cnt > 4) + return -EINVAL; + if (t4_read_reg(adapter, SF_OP) & BUSY) + return -EBUSY; + cont = cont ? SF_CONT : 0; + lock = lock ? SF_LOCK : 0; + t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1)); + ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5); + if (!ret) + *valp = t4_read_reg(adapter, SF_DATA); + return ret; +} + +/** + * sf1_write - write data to the serial flash + * @adapter: the adapter + * @byte_cnt: number of bytes to write + * @cont: whether another operation will be chained + * @lock: whether to lock SF for PL access only + * @val: value to write + * + * Writes up to 4 bytes of data to the serial flash. The location of + * the write needs to be specified prior to calling this by issuing the + * appropriate commands to the serial flash. + */ +static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, + int lock, u32 val) +{ + if (!byte_cnt || byte_cnt > 4) + return -EINVAL; + if (t4_read_reg(adapter, SF_OP) & BUSY) + return -EBUSY; + cont = cont ? SF_CONT : 0; + lock = lock ? SF_LOCK : 0; + t4_write_reg(adapter, SF_DATA, val); + t4_write_reg(adapter, SF_OP, lock | + cont | BYTECNT(byte_cnt - 1) | OP_WR); + return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5); +} + +/** + * flash_wait_op - wait for a flash operation to complete + * @adapter: the adapter + * @attempts: max number of polls of the status register + * @delay: delay between polls in ms + * + * Wait for a flash operation to complete by polling the status register. + */ +static int flash_wait_op(struct adapter *adapter, int attempts, int delay) +{ + int ret; + u32 status; + + while (1) { + if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 || + (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0) + return ret; + if (!(status & 1)) + return 0; + if (--attempts == 0) + return -EAGAIN; + if (delay) + msleep(delay); + } +} + +/** + * t4_read_flash - read words from serial flash + * @adapter: the adapter + * @addr: the start address for the read + * @nwords: how many 32-bit words to read + * @data: where to store the read data + * @byte_oriented: whether to store data as bytes or as words + * + * Read the specified number of 32-bit words from the serial flash. + * If @byte_oriented is set the read data is stored as a byte array + * (i.e., big-endian), otherwise as 32-bit words in the platform's + * natural endianess. + */ +int t4_read_flash(struct adapter *adapter, unsigned int addr, + unsigned int nwords, u32 *data, int byte_oriented) +{ + int ret; + + if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3)) + return -EINVAL; + + addr = swab32(addr) | SF_RD_DATA_FAST; + + if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 || + (ret = sf1_read(adapter, 1, 1, 0, data)) != 0) + return ret; + + for ( ; nwords; nwords--, data++) { + ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); + if (nwords == 1) + t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ + if (ret) + return ret; + if (byte_oriented) + *data = htonl(*data); + } + return 0; +} + +/** + * t4_write_flash - write up to a page of data to the serial flash + * @adapter: the adapter + * @addr: the start address to write + * @n: length of data to write in bytes + * @data: the data to write + * + * Writes up to a page of data (256 bytes) to the serial flash starting + * at the given address. All the data must be written to the same page. + */ +static int t4_write_flash(struct adapter *adapter, unsigned int addr, + unsigned int n, const u8 *data) +{ + int ret; + u32 buf[64]; + unsigned int i, c, left, val, offset = addr & 0xff; + + if (addr >= SF_SIZE || offset + n > SF_PAGE_SIZE) + return -EINVAL; + + val = swab32(addr) | SF_PROG_PAGE; + + if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || + (ret = sf1_write(adapter, 4, 1, 1, val)) != 0) + goto unlock; + + for (left = n; left; left -= c) { + c = min(left, 4U); + for (val = 0, i = 0; i < c; ++i) + val = (val << 8) + *data++; + + ret = sf1_write(adapter, c, c != left, 1, val); + if (ret) + goto unlock; + } + ret = flash_wait_op(adapter, 5, 1); + if (ret) + goto unlock; + + t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ + + /* Read the page to verify the write succeeded */ + ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); + if (ret) + return ret; + + if (memcmp(data - n, (u8 *)buf + offset, n)) { + dev_err(adapter->pdev_dev, + "failed to correctly write the flash page at %#x\n", + addr); + return -EIO; + } + return 0; + +unlock: + t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ + return ret; +} + +/** + * get_fw_version - read the firmware version + * @adapter: the adapter + * @vers: where to place the version + * + * Reads the FW version from flash. + */ +static int get_fw_version(struct adapter *adapter, u32 *vers) +{ + return t4_read_flash(adapter, + FW_IMG_START + offsetof(struct fw_hdr, fw_ver), 1, + vers, 0); +} + +/** + * get_tp_version - read the TP microcode version + * @adapter: the adapter + * @vers: where to place the version + * + * Reads the TP microcode version from flash. + */ +static int get_tp_version(struct adapter *adapter, u32 *vers) +{ + return t4_read_flash(adapter, FW_IMG_START + offsetof(struct fw_hdr, + tp_microcode_ver), + 1, vers, 0); +} + +/** + * t4_check_fw_version - check if the FW is compatible with this driver + * @adapter: the adapter + * + * Checks if an adapter's FW is compatible with the driver. Returns 0 + * if there's exact match, a negative error if the version could not be + * read or there's a major version mismatch, and a positive value if the + * expected major version is found but there's a minor version mismatch. + */ +int t4_check_fw_version(struct adapter *adapter) +{ + u32 api_vers[2]; + int ret, major, minor, micro; + + ret = get_fw_version(adapter, &adapter->params.fw_vers); + if (!ret) + ret = get_tp_version(adapter, &adapter->params.tp_vers); + if (!ret) + ret = t4_read_flash(adapter, + FW_IMG_START + offsetof(struct fw_hdr, intfver_nic), + 2, api_vers, 1); + if (ret) + return ret; + + major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers); + minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers); + micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers); + memcpy(adapter->params.api_vers, api_vers, + sizeof(adapter->params.api_vers)); + + if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */ + dev_err(adapter->pdev_dev, + "card FW has major version %u, driver wants %u\n", + major, FW_VERSION_MAJOR); + return -EINVAL; + } + + if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO) + return 0; /* perfect match */ + + /* Minor/micro version mismatch. Report it but often it's OK. */ + return 1; +} + +/** + * t4_flash_erase_sectors - erase a range of flash sectors + * @adapter: the adapter + * @start: the first sector to erase + * @end: the last sector to erase + * + * Erases the sectors in the given inclusive range. + */ +static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) +{ + int ret = 0; + + while (start <= end) { + if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || + (ret = sf1_write(adapter, 4, 0, 1, + SF_ERASE_SECTOR | (start << 8))) != 0 || + (ret = flash_wait_op(adapter, 5, 500)) != 0) { + dev_err(adapter->pdev_dev, + "erase of flash sector %d failed, error %d\n", + start, ret); + break; + } + start++; + } + t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ + return ret; +} + +/** + * t4_load_fw - download firmware + * @adap: the adapter + * @fw_data: the firmware image to write + * @size: image size + * + * Write the supplied firmware image to the card's serial flash. + */ +int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) +{ + u32 csum; + int ret, addr; + unsigned int i; + u8 first_page[SF_PAGE_SIZE]; + const u32 *p = (const u32 *)fw_data; + const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; + + if (!size) { + dev_err(adap->pdev_dev, "FW image has no data\n"); + return -EINVAL; + } + if (size & 511) { + dev_err(adap->pdev_dev, + "FW image size not multiple of 512 bytes\n"); + return -EINVAL; + } + if (ntohs(hdr->len512) * 512 != size) { + dev_err(adap->pdev_dev, + "FW image size differs from size in FW header\n"); + return -EINVAL; + } + if (size > FW_MAX_SIZE) { + dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n", + FW_MAX_SIZE); + return -EFBIG; + } + + for (csum = 0, i = 0; i < size / sizeof(csum); i++) + csum += ntohl(p[i]); + + if (csum != 0xffffffff) { + dev_err(adap->pdev_dev, + "corrupted firmware image, checksum %#x\n", csum); + return -EINVAL; + } + + i = DIV_ROUND_UP(size, SF_SEC_SIZE); /* # of sectors spanned */ + ret = t4_flash_erase_sectors(adap, FW_START_SEC, FW_START_SEC + i - 1); + if (ret) + goto out; + + /* + * We write the correct version at the end so the driver can see a bad + * version if the FW write fails. Start by writing a copy of the + * first page with a bad version. + */ + memcpy(first_page, fw_data, SF_PAGE_SIZE); + ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); + ret = t4_write_flash(adap, FW_IMG_START, SF_PAGE_SIZE, first_page); + if (ret) + goto out; + + addr = FW_IMG_START; + for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { + addr += SF_PAGE_SIZE; + fw_data += SF_PAGE_SIZE; + ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data); + if (ret) + goto out; + } + + ret = t4_write_flash(adap, + FW_IMG_START + offsetof(struct fw_hdr, fw_ver), + sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver); +out: + if (ret) + dev_err(adap->pdev_dev, "firmware download failed, error %d\n", + ret); + return ret; +} + +#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ + FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG) + +/** + * t4_link_start - apply link configuration to MAC/PHY + * @phy: the PHY to setup + * @mac: the MAC to setup + * @lc: the requested link configuration + * + * Set up a port's MAC and PHY according to a desired link configuration. + * - If the PHY can auto-negotiate first decide what to advertise, then + * enable/disable auto-negotiation as desired, and reset. + * - If the PHY does not auto-negotiate just reset it. + * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, + * otherwise do it later based on the outcome of auto-negotiation. + */ +int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, + struct link_config *lc) +{ + struct fw_port_cmd c; + unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO); + + lc->link_ok = 0; + if (lc->requested_fc & PAUSE_RX) + fc |= FW_PORT_CAP_FC_RX; + if (lc->requested_fc & PAUSE_TX) + fc |= FW_PORT_CAP_FC_TX; + + memset(&c, 0, sizeof(c)); + c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); + c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | + FW_LEN16(c)); + + if (!(lc->supported & FW_PORT_CAP_ANEG)) { + c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc); + lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); + } else if (lc->autoneg == AUTONEG_DISABLE) { + c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi); + lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); + } else + c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi); + + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_restart_aneg - restart autonegotiation + * @adap: the adapter + * @mbox: mbox to use for the FW command + * @port: the port id + * + * Restarts autonegotiation for the selected port. + */ +int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) +{ + struct fw_port_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); + c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | + FW_LEN16(c)); + c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_set_vlan_accel - configure HW VLAN extraction + * @adap: the adapter + * @ports: bitmap of adapter ports to operate on + * @on: enable (1) or disable (0) HW VLAN extraction + * + * Enables or disables HW extraction of VLAN tags for the ports specified + * by @ports. @ports is a bitmap with the ith bit designating the port + * associated with the ith adapter channel. + */ +void t4_set_vlan_accel(struct adapter *adap, unsigned int ports, int on) +{ + ports <<= VLANEXTENABLE_SHIFT; + t4_set_reg_field(adap, TP_OUT_CONFIG, ports, on ? ports : 0); +} + +struct intr_info { + unsigned int mask; /* bits to check in interrupt status */ + const char *msg; /* message to print or NULL */ + short stat_idx; /* stat counter to increment or -1 */ + unsigned short fatal; /* whether the condition reported is fatal */ +}; + +/** + * t4_handle_intr_status - table driven interrupt handler + * @adapter: the adapter that generated the interrupt + * @reg: the interrupt status register to process + * @acts: table of interrupt actions + * + * A table driven interrupt handler that applies a set of masks to an + * interrupt status word and performs the corresponding actions if the + * interrupts described by the mask have occured. The actions include + * optionally emitting a warning or alert message. The table is terminated + * by an entry specifying mask 0. Returns the number of fatal interrupt + * conditions. + */ +static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg, + const struct intr_info *acts) +{ + int fatal = 0; + unsigned int mask = 0; + unsigned int status = t4_read_reg(adapter, reg); + + for ( ; acts->mask; ++acts) { + if (!(status & acts->mask)) + continue; + if (acts->fatal) { + fatal++; + dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, + status & acts->mask); + } else if (acts->msg && printk_ratelimit()) + dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, + status & acts->mask); + mask |= acts->mask; + } + status &= mask; + if (status) /* clear processed interrupts */ + t4_write_reg(adapter, reg, status); + return fatal; +} + +/* + * Interrupt handler for the PCIE module. + */ +static void pcie_intr_handler(struct adapter *adapter) +{ + static struct intr_info sysbus_intr_info[] = { + { RNPP, "RXNP array parity error", -1, 1 }, + { RPCP, "RXPC array parity error", -1, 1 }, + { RCIP, "RXCIF array parity error", -1, 1 }, + { RCCP, "Rx completions control array parity error", -1, 1 }, + { RFTP, "RXFT array parity error", -1, 1 }, + { 0 } + }; + static struct intr_info pcie_port_intr_info[] = { + { TPCP, "TXPC array parity error", -1, 1 }, + { TNPP, "TXNP array parity error", -1, 1 }, + { TFTP, "TXFT array parity error", -1, 1 }, + { TCAP, "TXCA array parity error", -1, 1 }, + { TCIP, "TXCIF array parity error", -1, 1 }, + { RCAP, "RXCA array parity error", -1, 1 }, + { OTDD, "outbound request TLP discarded", -1, 1 }, + { RDPE, "Rx data parity error", -1, 1 }, + { TDUE, "Tx uncorrectable data error", -1, 1 }, + { 0 } + }; + static struct intr_info pcie_intr_info[] = { + { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 }, + { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 }, + { MSIDATAPERR, "MSI data parity error", -1, 1 }, + { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, + { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, + { MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, + { MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, + { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 }, + { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 }, + { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, + { CCNTPERR, "PCI CMD channel count parity error", -1, 1 }, + { CREQPERR, "PCI CMD channel request parity error", -1, 1 }, + { CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, + { DCNTPERR, "PCI DMA channel count parity error", -1, 1 }, + { DREQPERR, "PCI DMA channel request parity error", -1, 1 }, + { DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, + { HCNTPERR, "PCI HMA channel count parity error", -1, 1 }, + { HREQPERR, "PCI HMA channel request parity error", -1, 1 }, + { HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, + { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, + { FIDPERR, "PCI FID parity error", -1, 1 }, + { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 }, + { MATAGPERR, "PCI MA tag parity error", -1, 1 }, + { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, + { RXCPLPERR, "PCI Rx completion parity error", -1, 1 }, + { RXWRPERR, "PCI Rx write parity error", -1, 1 }, + { RPLPERR, "PCI replay buffer parity error", -1, 1 }, + { PCIESINT, "PCI core secondary fault", -1, 1 }, + { PCIEPINT, "PCI core primary fault", -1, 1 }, + { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 }, + { 0 } + }; + + int fat; + + fat = t4_handle_intr_status(adapter, + PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, + sysbus_intr_info) + + t4_handle_intr_status(adapter, + PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, + pcie_port_intr_info) + + t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info); + if (fat) + t4_fatal_err(adapter); +} + +/* + * TP interrupt handler. + */ +static void tp_intr_handler(struct adapter *adapter) +{ + static struct intr_info tp_intr_info[] = { + { 0x3fffffff, "TP parity error", -1, 1 }, + { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info)) + t4_fatal_err(adapter); +} + +/* + * SGE interrupt handler. + */ +static void sge_intr_handler(struct adapter *adapter) +{ + u64 v; + + static struct intr_info sge_intr_info[] = { + { ERR_CPL_EXCEED_IQE_SIZE, + "SGE received CPL exceeding IQE size", -1, 1 }, + { ERR_INVALID_CIDX_INC, + "SGE GTS CIDX increment too large", -1, 0 }, + { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, + { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 }, + { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0, + "SGE IQID > 1023 received CPL for FL", -1, 0 }, + { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, + 0 }, + { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1, + 0 }, + { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1, + 0 }, + { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1, + 0 }, + { ERR_ING_CTXT_PRIO, + "SGE too many priority ingress contexts", -1, 0 }, + { ERR_EGR_CTXT_PRIO, + "SGE too many priority egress contexts", -1, 0 }, + { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 }, + { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 }, + { 0 } + }; + + v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) | + ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32); + if (v) { + dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n", + (unsigned long long)v); + t4_write_reg(adapter, SGE_INT_CAUSE1, v); + t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32); + } + + if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) || + v != 0) + t4_fatal_err(adapter); +} + +/* + * CIM interrupt handler. + */ +static void cim_intr_handler(struct adapter *adapter) +{ + static struct intr_info cim_intr_info[] = { + { PREFDROPINT, "CIM control register prefetch drop", -1, 1 }, + { OBQPARERR, "CIM OBQ parity error", -1, 1 }, + { IBQPARERR, "CIM IBQ parity error", -1, 1 }, + { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 }, + { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 }, + { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 }, + { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 }, + { 0 } + }; + static struct intr_info cim_upintr_info[] = { + { RSVDSPACEINT, "CIM reserved space access", -1, 1 }, + { ILLTRANSINT, "CIM illegal transaction", -1, 1 }, + { ILLWRINT, "CIM illegal write", -1, 1 }, + { ILLRDINT, "CIM illegal read", -1, 1 }, + { ILLRDBEINT, "CIM illegal read BE", -1, 1 }, + { ILLWRBEINT, "CIM illegal write BE", -1, 1 }, + { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 }, + { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 }, + { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 }, + { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 }, + { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 }, + { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 }, + { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 }, + { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 }, + { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 }, + { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 }, + { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 }, + { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 }, + { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 }, + { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 }, + { SGLRDPLINT , "CIM single read from PL space", -1, 1 }, + { SGLWRPLINT , "CIM single write to PL space", -1, 1 }, + { BLKRDPLINT , "CIM block read from PL space", -1, 1 }, + { BLKWRPLINT , "CIM block write to PL space", -1, 1 }, + { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 }, + { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 }, + { TIMEOUTINT , "CIM PIF timeout", -1, 1 }, + { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 }, + { 0 } + }; + + int fat; + + fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE, + cim_intr_info) + + t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE, + cim_upintr_info); + if (fat) + t4_fatal_err(adapter); +} + +/* + * ULP RX interrupt handler. + */ +static void ulprx_intr_handler(struct adapter *adapter) +{ + static struct intr_info ulprx_intr_info[] = { + { 0x7fffff, "ULPRX parity error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info)) + t4_fatal_err(adapter); +} + +/* + * ULP TX interrupt handler. + */ +static void ulptx_intr_handler(struct adapter *adapter) +{ + static struct intr_info ulptx_intr_info[] = { + { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1, + 0 }, + { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1, + 0 }, + { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1, + 0 }, + { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1, + 0 }, + { 0xfffffff, "ULPTX parity error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info)) + t4_fatal_err(adapter); +} + +/* + * PM TX interrupt handler. + */ +static void pmtx_intr_handler(struct adapter *adapter) +{ + static struct intr_info pmtx_intr_info[] = { + { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 }, + { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 }, + { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 }, + { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 }, + { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 }, + { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 }, + { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 }, + { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 }, + { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1}, + { 0 } + }; + + if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info)) + t4_fatal_err(adapter); +} + +/* + * PM RX interrupt handler. + */ +static void pmrx_intr_handler(struct adapter *adapter) +{ + static struct intr_info pmrx_intr_info[] = { + { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, + { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 }, + { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 }, + { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 }, + { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 }, + { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1}, + { 0 } + }; + + if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info)) + t4_fatal_err(adapter); +} + +/* + * CPL switch interrupt handler. + */ +static void cplsw_intr_handler(struct adapter *adapter) +{ + static struct intr_info cplsw_intr_info[] = { + { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 }, + { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 }, + { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 }, + { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 }, + { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 }, + { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info)) + t4_fatal_err(adapter); +} + +/* + * LE interrupt handler. + */ +static void le_intr_handler(struct adapter *adap) +{ + static struct intr_info le_intr_info[] = { + { LIPMISS, "LE LIP miss", -1, 0 }, + { LIP0, "LE 0 LIP error", -1, 0 }, + { PARITYERR, "LE parity error", -1, 1 }, + { UNKNOWNCMD, "LE unknown command", -1, 1 }, + { REQQPARERR, "LE request queue parity error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info)) + t4_fatal_err(adap); +} + +/* + * MPS interrupt handler. + */ +static void mps_intr_handler(struct adapter *adapter) +{ + static struct intr_info mps_rx_intr_info[] = { + { 0xffffff, "MPS Rx parity error", -1, 1 }, + { 0 } + }; + static struct intr_info mps_tx_intr_info[] = { + { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 }, + { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, + { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 }, + { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 }, + { BUBBLE, "MPS Tx underflow", -1, 1 }, + { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 }, + { FRMERR, "MPS Tx framing error", -1, 1 }, + { 0 } + }; + static struct intr_info mps_trc_intr_info[] = { + { FILTMEM, "MPS TRC filter parity error", -1, 1 }, + { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 }, + { MISCPERR, "MPS TRC misc parity error", -1, 1 }, + { 0 } + }; + static struct intr_info mps_stat_sram_intr_info[] = { + { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, + { 0 } + }; + static struct intr_info mps_stat_tx_intr_info[] = { + { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, + { 0 } + }; + static struct intr_info mps_stat_rx_intr_info[] = { + { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, + { 0 } + }; + static struct intr_info mps_cls_intr_info[] = { + { MATCHSRAM, "MPS match SRAM parity error", -1, 1 }, + { MATCHTCAM, "MPS match TCAM parity error", -1, 1 }, + { HASHSRAM, "MPS hash SRAM parity error", -1, 1 }, + { 0 } + }; + + int fat; + + fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE, + mps_rx_intr_info) + + t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE, + mps_tx_intr_info) + + t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE, + mps_trc_intr_info) + + t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM, + mps_stat_sram_intr_info) + + t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO, + mps_stat_tx_intr_info) + + t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO, + mps_stat_rx_intr_info) + + t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE, + mps_cls_intr_info); + + t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT | + RXINT | TXINT | STATINT); + t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */ + if (fat) + t4_fatal_err(adapter); +} + +#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE) + +/* + * EDC/MC interrupt handler. + */ +static void mem_intr_handler(struct adapter *adapter, int idx) +{ + static const char name[3][5] = { "EDC0", "EDC1", "MC" }; + + unsigned int addr, cnt_addr, v; + + if (idx <= MEM_EDC1) { + addr = EDC_REG(EDC_INT_CAUSE, idx); + cnt_addr = EDC_REG(EDC_ECC_STATUS, idx); + } else { + addr = MC_INT_CAUSE; + cnt_addr = MC_ECC_STATUS; + } + + v = t4_read_reg(adapter, addr) & MEM_INT_MASK; + if (v & PERR_INT_CAUSE) + dev_alert(adapter->pdev_dev, "%s FIFO parity error\n", + name[idx]); + if (v & ECC_CE_INT_CAUSE) { + u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr)); + + t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK); + if (printk_ratelimit()) + dev_warn(adapter->pdev_dev, + "%u %s correctable ECC data error%s\n", + cnt, name[idx], cnt > 1 ? "s" : ""); + } + if (v & ECC_UE_INT_CAUSE) + dev_alert(adapter->pdev_dev, + "%s uncorrectable ECC data error\n", name[idx]); + + t4_write_reg(adapter, addr, v); + if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE)) + t4_fatal_err(adapter); +} + +/* + * MA interrupt handler. + */ +static void ma_intr_handler(struct adapter *adap) +{ + u32 v, status = t4_read_reg(adap, MA_INT_CAUSE); + + if (status & MEM_PERR_INT_CAUSE) + dev_alert(adap->pdev_dev, + "MA parity error, parity status %#x\n", + t4_read_reg(adap, MA_PARITY_ERROR_STATUS)); + if (status & MEM_WRAP_INT_CAUSE) { + v = t4_read_reg(adap, MA_INT_WRAP_STATUS); + dev_alert(adap->pdev_dev, "MA address wrap-around error by " + "client %u to address %#x\n", + MEM_WRAP_CLIENT_NUM_GET(v), + MEM_WRAP_ADDRESS_GET(v) << 4); + } + t4_write_reg(adap, MA_INT_CAUSE, status); + t4_fatal_err(adap); +} + +/* + * SMB interrupt handler. + */ +static void smb_intr_handler(struct adapter *adap) +{ + static struct intr_info smb_intr_info[] = { + { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 }, + { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 }, + { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info)) + t4_fatal_err(adap); +} + +/* + * NC-SI interrupt handler. + */ +static void ncsi_intr_handler(struct adapter *adap) +{ + static struct intr_info ncsi_intr_info[] = { + { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 }, + { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 }, + { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 }, + { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info)) + t4_fatal_err(adap); +} + +/* + * XGMAC interrupt handler. + */ +static void xgmac_intr_handler(struct adapter *adap, int port) +{ + u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE)); + + v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR; + if (!v) + return; + + if (v & TXFIFO_PRTY_ERR) + dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n", + port); + if (v & RXFIFO_PRTY_ERR) + dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n", + port); + t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v); + t4_fatal_err(adap); +} + +/* + * PL interrupt handler. + */ +static void pl_intr_handler(struct adapter *adap) +{ + static struct intr_info pl_intr_info[] = { + { FATALPERR, "T4 fatal parity error", -1, 1 }, + { PERRVFID, "PL VFID_MAP parity error", -1, 1 }, + { 0 } + }; + + if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info)) + t4_fatal_err(adap); +} + +#define PF_INTR_MASK (PFSW | PFCIM) +#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \ + EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \ + CPL_SWITCH | SGE | ULP_TX) + +/** + * t4_slow_intr_handler - control path interrupt handler + * @adapter: the adapter + * + * T4 interrupt handler for non-data global interrupt events, e.g., errors. + * The designation 'slow' is because it involves register reads, while + * data interrupts typically don't involve any MMIOs. + */ +int t4_slow_intr_handler(struct adapter *adapter) +{ + u32 cause = t4_read_reg(adapter, PL_INT_CAUSE); + + if (!(cause & GLBL_INTR_MASK)) + return 0; + if (cause & CIM) + cim_intr_handler(adapter); + if (cause & MPS) + mps_intr_handler(adapter); + if (cause & NCSI) + ncsi_intr_handler(adapter); + if (cause & PL) + pl_intr_handler(adapter); + if (cause & SMB) + smb_intr_handler(adapter); + if (cause & XGMAC0) + xgmac_intr_handler(adapter, 0); + if (cause & XGMAC1) + xgmac_intr_handler(adapter, 1); + if (cause & XGMAC_KR0) + xgmac_intr_handler(adapter, 2); + if (cause & XGMAC_KR1) + xgmac_intr_handler(adapter, 3); + if (cause & PCIE) + pcie_intr_handler(adapter); + if (cause & MC) + mem_intr_handler(adapter, MEM_MC); + if (cause & EDC0) + mem_intr_handler(adapter, MEM_EDC0); + if (cause & EDC1) + mem_intr_handler(adapter, MEM_EDC1); + if (cause & LE) + le_intr_handler(adapter); + if (cause & TP) + tp_intr_handler(adapter); + if (cause & MA) + ma_intr_handler(adapter); + if (cause & PM_TX) + pmtx_intr_handler(adapter); + if (cause & PM_RX) + pmrx_intr_handler(adapter); + if (cause & ULP_RX) + ulprx_intr_handler(adapter); + if (cause & CPL_SWITCH) + cplsw_intr_handler(adapter); + if (cause & SGE) + sge_intr_handler(adapter); + if (cause & ULP_TX) + ulptx_intr_handler(adapter); + + /* Clear the interrupts just processed for which we are the master. */ + t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK); + (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */ + return 1; +} + +/** + * t4_intr_enable - enable interrupts + * @adapter: the adapter whose interrupts should be enabled + * + * Enable PF-specific interrupts for the calling function and the top-level + * interrupt concentrator for global interrupts. Interrupts are already + * enabled at each module, here we just enable the roots of the interrupt + * hierarchies. + * + * Note: this function should be called only when the driver manages + * non PF-specific interrupts from the various HW modules. Only one PCI + * function at a time should be doing this. + */ +void t4_intr_enable(struct adapter *adapter) +{ + u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI)); + + t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE | + ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 | + ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 | + ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 | + ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 | + ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO | + ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR | + EGRESS_SIZE_ERR); + t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK); + t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf); +} + +/** + * t4_intr_disable - disable interrupts + * @adapter: the adapter whose interrupts should be disabled + * + * Disable interrupts. We only disable the top-level interrupt + * concentrators. The caller must be a PCI function managing global + * interrupts. + */ +void t4_intr_disable(struct adapter *adapter) +{ + u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI)); + + t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0); + t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0); +} + +/** + * t4_intr_clear - clear all interrupts + * @adapter: the adapter whose interrupts should be cleared + * + * Clears all interrupts. The caller must be a PCI function managing + * global interrupts. + */ +void t4_intr_clear(struct adapter *adapter) +{ + static const unsigned int cause_reg[] = { + SGE_INT_CAUSE1, SGE_INT_CAUSE2, SGE_INT_CAUSE3, + PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, + PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, + PCIE_NONFAT_ERR, PCIE_INT_CAUSE, + MC_INT_CAUSE, + MA_INT_WRAP_STATUS, MA_PARITY_ERROR_STATUS, MA_INT_CAUSE, + EDC_INT_CAUSE, EDC_REG(EDC_INT_CAUSE, 1), + CIM_HOST_INT_CAUSE, CIM_HOST_UPACC_INT_CAUSE, + MYPF_REG(CIM_PF_HOST_INT_CAUSE), + TP_INT_CAUSE, + ULP_RX_INT_CAUSE, ULP_TX_INT_CAUSE, + PM_RX_INT_CAUSE, PM_TX_INT_CAUSE, + MPS_RX_PERR_INT_CAUSE, + CPL_INTR_CAUSE, + MYPF_REG(PL_PF_INT_CAUSE), + PL_PL_INT_CAUSE, + LE_DB_INT_CAUSE, + }; + + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(cause_reg); ++i) + t4_write_reg(adapter, cause_reg[i], 0xffffffff); + + t4_write_reg(adapter, PL_INT_CAUSE, GLBL_INTR_MASK); + (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */ +} + +/** + * hash_mac_addr - return the hash value of a MAC address + * @addr: the 48-bit Ethernet MAC address + * + * Hashes a MAC address according to the hash function used by HW inexact + * (hash) address matching. + */ +static int hash_mac_addr(const u8 *addr) +{ + u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; + u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; + a ^= b; + a ^= (a >> 12); + a ^= (a >> 6); + return a & 0x3f; +} + +/** + * t4_config_rss_range - configure a portion of the RSS mapping table + * @adapter: the adapter + * @mbox: mbox to use for the FW command + * @viid: virtual interface whose RSS subtable is to be written + * @start: start entry in the table to write + * @n: how many table entries to write + * @rspq: values for the response queue lookup table + * @nrspq: number of values in @rspq + * + * Programs the selected part of the VI's RSS mapping table with the + * provided values. If @nrspq < @n the supplied values are used repeatedly + * until the full table range is populated. + * + * The caller must ensure the values in @rspq are in the range allowed for + * @viid. + */ +int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, + int start, int n, const u16 *rspq, unsigned int nrspq) +{ + int ret; + const u16 *rsp = rspq; + const u16 *rsp_end = rspq + nrspq; + struct fw_rss_ind_tbl_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) | + FW_CMD_REQUEST | FW_CMD_WRITE | + FW_RSS_IND_TBL_CMD_VIID(viid)); + cmd.retval_len16 = htonl(FW_LEN16(cmd)); + + /* each fw_rss_ind_tbl_cmd takes up to 32 entries */ + while (n > 0) { + int nq = min(n, 32); + __be32 *qp = &cmd.iq0_to_iq2; + + cmd.niqid = htons(nq); + cmd.startidx = htons(start); + + start += nq; + n -= nq; + + while (nq > 0) { + unsigned int v; + + v = FW_RSS_IND_TBL_CMD_IQ0(*rsp); + if (++rsp >= rsp_end) + rsp = rspq; + v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp); + if (++rsp >= rsp_end) + rsp = rspq; + v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp); + if (++rsp >= rsp_end) + rsp = rspq; + + *qp++ = htonl(v); + nq -= 3; + } + + ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); + if (ret) + return ret; + } + return 0; +} + +/** + * t4_config_glbl_rss - configure the global RSS mode + * @adapter: the adapter + * @mbox: mbox to use for the FW command + * @mode: global RSS mode + * @flags: mode-specific flags + * + * Sets the global RSS mode. + */ +int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, + unsigned int flags) +{ + struct fw_rss_glb_config_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | + FW_CMD_REQUEST | FW_CMD_WRITE); + c.retval_len16 = htonl(FW_LEN16(c)); + if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { + c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); + } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { + c.u.basicvirtual.mode_pkd = + htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); + c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags); + } else + return -EINVAL; + return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); +} + +/* Read an RSS table row */ +static int rd_rss_row(struct adapter *adap, int row, u32 *val) +{ + t4_write_reg(adap, TP_RSS_LKP_TABLE, 0xfff00000 | row); + return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE, LKPTBLROWVLD, 1, + 5, 0, val); +} + +/** + * t4_read_rss - read the contents of the RSS mapping table + * @adapter: the adapter + * @map: holds the contents of the RSS mapping table + * + * Reads the contents of the RSS hash->queue mapping table. + */ +int t4_read_rss(struct adapter *adapter, u16 *map) +{ + u32 val; + int i, ret; + + for (i = 0; i < RSS_NENTRIES / 2; ++i) { + ret = rd_rss_row(adapter, i, &val); + if (ret) + return ret; + *map++ = LKPTBLQUEUE0_GET(val); + *map++ = LKPTBLQUEUE1_GET(val); + } + return 0; +} + +/** + * t4_tp_get_tcp_stats - read TP's TCP MIB counters + * @adap: the adapter + * @v4: holds the TCP/IP counter values + * @v6: holds the TCP/IPv6 counter values + * + * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters. + * Either @v4 or @v6 may be %NULL to skip the corresponding stats. + */ +void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, + struct tp_tcp_stats *v6) +{ + u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1]; + +#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST) +#define STAT(x) val[STAT_IDX(x)] +#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO)) + + if (v4) { + t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val, + ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST); + v4->tcpOutRsts = STAT(OUT_RST); + v4->tcpInSegs = STAT64(IN_SEG); + v4->tcpOutSegs = STAT64(OUT_SEG); + v4->tcpRetransSegs = STAT64(RXT_SEG); + } + if (v6) { + t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val, + ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST); + v6->tcpOutRsts = STAT(OUT_RST); + v6->tcpInSegs = STAT64(IN_SEG); + v6->tcpOutSegs = STAT64(OUT_SEG); + v6->tcpRetransSegs = STAT64(RXT_SEG); + } +#undef STAT64 +#undef STAT +#undef STAT_IDX +} + +/** + * t4_tp_get_err_stats - read TP's error MIB counters + * @adap: the adapter + * @st: holds the counter values + * + * Returns the values of TP's error counters. + */ +void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st) +{ + t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->macInErrs, + 12, TP_MIB_MAC_IN_ERR_0); + t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlCongDrops, + 8, TP_MIB_TNL_CNG_DROP_0); + t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlTxDrops, + 4, TP_MIB_TNL_DROP_0); + t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->ofldVlanDrops, + 4, TP_MIB_OFD_VLN_DROP_0); + t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tcp6InErrs, + 4, TP_MIB_TCP_V6IN_ERR_0); + t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, &st->ofldNoNeigh, + 2, TP_MIB_OFD_ARP_DROP); +} + +/** + * t4_read_mtu_tbl - returns the values in the HW path MTU table + * @adap: the adapter + * @mtus: where to store the MTU values + * @mtu_log: where to store the MTU base-2 log (may be %NULL) + * + * Reads the HW path MTU table. + */ +void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) +{ + u32 v; + int i; + + for (i = 0; i < NMTUS; ++i) { + t4_write_reg(adap, TP_MTU_TABLE, + MTUINDEX(0xff) | MTUVALUE(i)); + v = t4_read_reg(adap, TP_MTU_TABLE); + mtus[i] = MTUVALUE_GET(v); + if (mtu_log) + mtu_log[i] = MTUWIDTH_GET(v); + } +} + +/** + * init_cong_ctrl - initialize congestion control parameters + * @a: the alpha values for congestion control + * @b: the beta values for congestion control + * + * Initialize the congestion control parameters. + */ +static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b) +{ + a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; + a[9] = 2; + a[10] = 3; + a[11] = 4; + a[12] = 5; + a[13] = 6; + a[14] = 7; + a[15] = 8; + a[16] = 9; + a[17] = 10; + a[18] = 14; + a[19] = 17; + a[20] = 21; + a[21] = 25; + a[22] = 30; + a[23] = 35; + a[24] = 45; + a[25] = 60; + a[26] = 80; + a[27] = 100; + a[28] = 200; + a[29] = 300; + a[30] = 400; + a[31] = 500; + + b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; + b[9] = b[10] = 1; + b[11] = b[12] = 2; + b[13] = b[14] = b[15] = b[16] = 3; + b[17] = b[18] = b[19] = b[20] = b[21] = 4; + b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; + b[28] = b[29] = 6; + b[30] = b[31] = 7; +} + +/* The minimum additive increment value for the congestion control table */ +#define CC_MIN_INCR 2U + +/** + * t4_load_mtus - write the MTU and congestion control HW tables + * @adap: the adapter + * @mtus: the values for the MTU table + * @alpha: the values for the congestion control alpha parameter + * @beta: the values for the congestion control beta parameter + * + * Write the HW MTU table with the supplied MTUs and the high-speed + * congestion control table with the supplied alpha, beta, and MTUs. + * We write the two tables together because the additive increments + * depend on the MTUs. + */ +void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, + const unsigned short *alpha, const unsigned short *beta) +{ + static const unsigned int avg_pkts[NCCTRL_WIN] = { + 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, + 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, + 28672, 40960, 57344, 81920, 114688, 163840, 229376 + }; + + unsigned int i, w; + + for (i = 0; i < NMTUS; ++i) { + unsigned int mtu = mtus[i]; + unsigned int log2 = fls(mtu); + + if (!(mtu & ((1 << log2) >> 2))) /* round */ + log2--; + t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) | + MTUWIDTH(log2) | MTUVALUE(mtu)); + + for (w = 0; w < NCCTRL_WIN; ++w) { + unsigned int inc; + + inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], + CC_MIN_INCR); + + t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) | + (w << 16) | (beta[w] << 13) | inc); + } + } +} + +/** + * t4_set_trace_filter - configure one of the tracing filters + * @adap: the adapter + * @tp: the desired trace filter parameters + * @idx: which filter to configure + * @enable: whether to enable or disable the filter + * + * Configures one of the tracing filters available in HW. If @enable is + * %0 @tp is not examined and may be %NULL. + */ +int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, + int idx, int enable) +{ + int i, ofst = idx * 4; + u32 data_reg, mask_reg, cfg; + u32 multitrc = TRCMULTIFILTER; + + if (!enable) { + t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0); + goto out; + } + + if (tp->port > 11 || tp->invert > 1 || tp->skip_len > 0x1f || + tp->skip_ofst > 0x1f || tp->min_len > 0x1ff || + tp->snap_len > 9600 || (idx && tp->snap_len > 256)) + return -EINVAL; + + if (tp->snap_len > 256) { /* must be tracer 0 */ + if ((t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 4) | + t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 8) | + t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 12)) & TFEN) + return -EINVAL; /* other tracers are enabled */ + multitrc = 0; + } else if (idx) { + i = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B); + if (TFCAPTUREMAX_GET(i) > 256 && + (t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A) & TFEN)) + return -EINVAL; + } + + /* stop the tracer we'll be changing */ + t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0); + + /* disable tracing globally if running in the wrong single/multi mode */ + cfg = t4_read_reg(adap, MPS_TRC_CFG); + if ((cfg & TRCEN) && multitrc != (cfg & TRCMULTIFILTER)) { + t4_write_reg(adap, MPS_TRC_CFG, cfg ^ TRCEN); + t4_read_reg(adap, MPS_TRC_CFG); /* flush */ + msleep(1); + if (!(t4_read_reg(adap, MPS_TRC_CFG) & TRCFIFOEMPTY)) + return -ETIMEDOUT; + } + /* + * At this point either the tracing is enabled and in the right mode or + * disabled. + */ + + idx *= (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH); + data_reg = MPS_TRC_FILTER0_MATCH + idx; + mask_reg = MPS_TRC_FILTER0_DONT_CARE + idx; + + for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { + t4_write_reg(adap, data_reg, tp->data[i]); + t4_write_reg(adap, mask_reg, ~tp->mask[i]); + } + t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst, + TFCAPTUREMAX(tp->snap_len) | + TFMINPKTSIZE(tp->min_len)); + t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, + TFOFFSET(tp->skip_ofst) | TFLENGTH(tp->skip_len) | + TFPORT(tp->port) | TFEN | + (tp->invert ? TFINVERTMATCH : 0)); + + cfg &= ~TRCMULTIFILTER; + t4_write_reg(adap, MPS_TRC_CFG, cfg | TRCEN | multitrc); +out: t4_read_reg(adap, MPS_TRC_CFG); /* flush */ + return 0; +} + +/** + * t4_get_trace_filter - query one of the tracing filters + * @adap: the adapter + * @tp: the current trace filter parameters + * @idx: which trace filter to query + * @enabled: non-zero if the filter is enabled + * + * Returns the current settings of one of the HW tracing filters. + */ +void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx, + int *enabled) +{ + u32 ctla, ctlb; + int i, ofst = idx * 4; + u32 data_reg, mask_reg; + + ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst); + ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst); + + *enabled = !!(ctla & TFEN); + tp->snap_len = TFCAPTUREMAX_GET(ctlb); + tp->min_len = TFMINPKTSIZE_GET(ctlb); + tp->skip_ofst = TFOFFSET_GET(ctla); + tp->skip_len = TFLENGTH_GET(ctla); + tp->invert = !!(ctla & TFINVERTMATCH); + tp->port = TFPORT_GET(ctla); + + ofst = (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH) * idx; + data_reg = MPS_TRC_FILTER0_MATCH + ofst; + mask_reg = MPS_TRC_FILTER0_DONT_CARE + ofst; + + for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) { + tp->mask[i] = ~t4_read_reg(adap, mask_reg); + tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i]; + } +} + +/** + * get_mps_bg_map - return the buffer groups associated with a port + * @adap: the adapter + * @idx: the port index + * + * Returns a bitmap indicating which MPS buffer groups are associated + * with the given port. Bit i is set if buffer group i is used by the + * port. + */ +static unsigned int get_mps_bg_map(struct adapter *adap, int idx) +{ + u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL)); + + if (n == 0) + return idx == 0 ? 0xf : 0; + if (n == 1) + return idx < 2 ? (3 << (2 * idx)) : 0; + return 1 << idx; +} + +/** + * t4_get_port_stats - collect port statistics + * @adap: the adapter + * @idx: the port index + * @p: the stats structure to fill + * + * Collect statistics related to the given port from HW. + */ +void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) +{ + u32 bgmap = get_mps_bg_map(adap, idx); + +#define GET_STAT(name) \ + t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L)) +#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) + + p->tx_octets = GET_STAT(TX_PORT_BYTES); + p->tx_frames = GET_STAT(TX_PORT_FRAMES); + p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST); + p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST); + p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST); + p->tx_error_frames = GET_STAT(TX_PORT_ERROR); + p->tx_frames_64 = GET_STAT(TX_PORT_64B); + p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B); + p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B); + p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B); + p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B); + p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B); + p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX); + p->tx_drop = GET_STAT(TX_PORT_DROP); + p->tx_pause = GET_STAT(TX_PORT_PAUSE); + p->tx_ppp0 = GET_STAT(TX_PORT_PPP0); + p->tx_ppp1 = GET_STAT(TX_PORT_PPP1); + p->tx_ppp2 = GET_STAT(TX_PORT_PPP2); + p->tx_ppp3 = GET_STAT(TX_PORT_PPP3); + p->tx_ppp4 = GET_STAT(TX_PORT_PPP4); + p->tx_ppp5 = GET_STAT(TX_PORT_PPP5); + p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); + p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); + + p->rx_octets = GET_STAT(RX_PORT_BYTES); + p->rx_frames = GET_STAT(RX_PORT_FRAMES); + p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); + p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST); + p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST); + p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR); + p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR); + p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR); + p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR); + p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR); + p->rx_runt = GET_STAT(RX_PORT_LESS_64B); + p->rx_frames_64 = GET_STAT(RX_PORT_64B); + p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B); + p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B); + p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B); + p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B); + p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B); + p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX); + p->rx_pause = GET_STAT(RX_PORT_PAUSE); + p->rx_ppp0 = GET_STAT(RX_PORT_PPP0); + p->rx_ppp1 = GET_STAT(RX_PORT_PPP1); + p->rx_ppp2 = GET_STAT(RX_PORT_PPP2); + p->rx_ppp3 = GET_STAT(RX_PORT_PPP3); + p->rx_ppp4 = GET_STAT(RX_PORT_PPP4); + p->rx_ppp5 = GET_STAT(RX_PORT_PPP5); + p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); + p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); + + p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; + p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; + p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; + p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0; + p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0; + p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0; + p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0; + p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0; + +#undef GET_STAT +#undef GET_STAT_COM +} + +/** + * t4_get_lb_stats - collect loopback port statistics + * @adap: the adapter + * @idx: the loopback port index + * @p: the stats structure to fill + * + * Return HW statistics for the given loopback port. + */ +void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p) +{ + u32 bgmap = get_mps_bg_map(adap, idx); + +#define GET_STAT(name) \ + t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L)) +#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) + + p->octets = GET_STAT(BYTES); + p->frames = GET_STAT(FRAMES); + p->bcast_frames = GET_STAT(BCAST); + p->mcast_frames = GET_STAT(MCAST); + p->ucast_frames = GET_STAT(UCAST); + p->error_frames = GET_STAT(ERROR); + + p->frames_64 = GET_STAT(64B); + p->frames_65_127 = GET_STAT(65B_127B); + p->frames_128_255 = GET_STAT(128B_255B); + p->frames_256_511 = GET_STAT(256B_511B); + p->frames_512_1023 = GET_STAT(512B_1023B); + p->frames_1024_1518 = GET_STAT(1024B_1518B); + p->frames_1519_max = GET_STAT(1519B_MAX); + p->drop = t4_read_reg(adap, PORT_REG(idx, + MPS_PORT_STAT_LB_PORT_DROP_FRAMES)); + + p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0; + p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0; + p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0; + p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0; + p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0; + p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0; + p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0; + p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0; + +#undef GET_STAT +#undef GET_STAT_COM +} + +/** + * t4_wol_magic_enable - enable/disable magic packet WoL + * @adap: the adapter + * @port: the physical port index + * @addr: MAC address expected in magic packets, %NULL to disable + * + * Enables/disables magic packet wake-on-LAN for the selected port. + */ +void t4_wol_magic_enable(struct adapter *adap, unsigned int port, + const u8 *addr) +{ + if (addr) { + t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO), + (addr[2] << 24) | (addr[3] << 16) | + (addr[4] << 8) | addr[5]); + t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI), + (addr[0] << 8) | addr[1]); + } + t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN, + addr ? MAGICEN : 0); +} + +/** + * t4_wol_pat_enable - enable/disable pattern-based WoL + * @adap: the adapter + * @port: the physical port index + * @map: bitmap of which HW pattern filters to set + * @mask0: byte mask for bytes 0-63 of a packet + * @mask1: byte mask for bytes 64-127 of a packet + * @crc: Ethernet CRC for selected bytes + * @enable: enable/disable switch + * + * Sets the pattern filters indicated in @map to mask out the bytes + * specified in @mask0/@mask1 in received packets and compare the CRC of + * the resulting packet against @crc. If @enable is %true pattern-based + * WoL is enabled, otherwise disabled. + */ +int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, + u64 mask0, u64 mask1, unsigned int crc, bool enable) +{ + int i; + + if (!enable) { + t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), + PATEN, 0); + return 0; + } + if (map > 0xff) + return -EINVAL; + +#define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name) + + t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); + t4_write_reg(adap, EPIO_REG(DATA2), mask1); + t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32); + + for (i = 0; i < NWOL_PAT; i++, map >>= 1) { + if (!(map & 1)) + continue; + + /* write byte masks */ + t4_write_reg(adap, EPIO_REG(DATA0), mask0); + t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR); + t4_read_reg(adap, EPIO_REG(OP)); /* flush */ + if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY) + return -ETIMEDOUT; + + /* write CRC */ + t4_write_reg(adap, EPIO_REG(DATA0), crc); + t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR); + t4_read_reg(adap, EPIO_REG(OP)); /* flush */ + if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY) + return -ETIMEDOUT; + } +#undef EPIO_REG + + t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN); + return 0; +} + +#define INIT_CMD(var, cmd, rd_wr) do { \ + (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \ + FW_CMD_REQUEST | FW_CMD_##rd_wr); \ + (var).retval_len16 = htonl(FW_LEN16(var)); \ +} while (0) + +/** + * t4_mdio_rd - read a PHY register through MDIO + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @phy_addr: the PHY address + * @mmd: the PHY MMD to access (0 for clause 22 PHYs) + * @reg: the register to read + * @valp: where to store the value + * + * Issues a FW command through the given mailbox to read a PHY register. + */ +int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, + unsigned int mmd, unsigned int reg, u16 *valp) +{ + int ret; + struct fw_ldst_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | + FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); + c.cycles_to_len16 = htonl(FW_LEN16(c)); + c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | + FW_LDST_CMD_MMD(mmd)); + c.u.mdio.raddr = htons(reg); + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret == 0) + *valp = ntohs(c.u.mdio.rval); + return ret; +} + +/** + * t4_mdio_wr - write a PHY register through MDIO + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @phy_addr: the PHY address + * @mmd: the PHY MMD to access (0 for clause 22 PHYs) + * @reg: the register to write + * @valp: value to write + * + * Issues a FW command through the given mailbox to write a PHY register. + */ +int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, + unsigned int mmd, unsigned int reg, u16 val) +{ + struct fw_ldst_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); + c.cycles_to_len16 = htonl(FW_LEN16(c)); + c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | + FW_LDST_CMD_MMD(mmd)); + c.u.mdio.raddr = htons(reg); + c.u.mdio.rval = htons(val); + + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_fw_hello - establish communication with FW + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @evt_mbox: mailbox to receive async FW events + * @master: specifies the caller's willingness to be the device master + * @state: returns the current device state + * + * Issues a command to establish communication with FW. + */ +int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, + enum dev_master master, enum dev_state *state) +{ + int ret; + struct fw_hello_cmd c; + + INIT_CMD(c, HELLO, WRITE); + c.err_to_mbasyncnot = htonl( + FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | + FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | + FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) | + FW_HELLO_CMD_MBASYNCNOT(evt_mbox)); + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret == 0 && state) { + u32 v = ntohl(c.err_to_mbasyncnot); + if (v & FW_HELLO_CMD_INIT) + *state = DEV_STATE_INIT; + else if (v & FW_HELLO_CMD_ERR) + *state = DEV_STATE_ERR; + else + *state = DEV_STATE_UNINIT; + } + return ret; +} + +/** + * t4_fw_bye - end communication with FW + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * + * Issues a command to terminate communication with FW. + */ +int t4_fw_bye(struct adapter *adap, unsigned int mbox) +{ + struct fw_bye_cmd c; + + INIT_CMD(c, BYE, WRITE); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_init_cmd - ask FW to initialize the device + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * + * Issues a command to FW to partially initialize the device. This + * performs initialization that generally doesn't depend on user input. + */ +int t4_early_init(struct adapter *adap, unsigned int mbox) +{ + struct fw_initialize_cmd c; + + INIT_CMD(c, INITIALIZE, WRITE); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_fw_reset - issue a reset to FW + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @reset: specifies the type of reset to perform + * + * Issues a reset command of the specified type to FW. + */ +int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) +{ + struct fw_reset_cmd c; + + INIT_CMD(c, RESET, WRITE); + c.val = htonl(reset); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_query_params - query FW or device parameters + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF + * @vf: the VF + * @nparams: the number of parameters + * @params: the parameter names + * @val: the parameter values + * + * Reads the value of FW or device parameters. Up to 7 parameters can be + * queried at once. + */ +int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int nparams, const u32 *params, + u32 *val) +{ + int i, ret; + struct fw_params_cmd c; + __be32 *p = &c.param[0].mnem; + + if (nparams > 7) + return -EINVAL; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | + FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) | + FW_PARAMS_CMD_VFN(vf)); + c.retval_len16 = htonl(FW_LEN16(c)); + for (i = 0; i < nparams; i++, p += 2) + *p = htonl(*params++); + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret == 0) + for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) + *val++ = ntohl(*p); + return ret; +} + +/** + * t4_set_params - sets FW or device parameters + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF + * @vf: the VF + * @nparams: the number of parameters + * @params: the parameter names + * @val: the parameter values + * + * Sets the value of FW or device parameters. Up to 7 parameters can be + * specified at once. + */ +int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int nparams, const u32 *params, + const u32 *val) +{ + struct fw_params_cmd c; + __be32 *p = &c.param[0].mnem; + + if (nparams > 7) + return -EINVAL; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) | + FW_PARAMS_CMD_VFN(vf)); + c.retval_len16 = htonl(FW_LEN16(c)); + while (nparams--) { + *p++ = htonl(*params++); + *p++ = htonl(*val++); + } + + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_cfg_pfvf - configure PF/VF resource limits + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF being configured + * @vf: the VF being configured + * @txq: the max number of egress queues + * @txq_eth_ctrl: the max number of egress Ethernet or control queues + * @rxqi: the max number of interrupt-capable ingress queues + * @rxq: the max number of interruptless ingress queues + * @tc: the PCI traffic class + * @vi: the max number of virtual interfaces + * @cmask: the channel access rights mask for the PF/VF + * @pmask: the port access rights mask for the PF/VF + * @nexact: the maximum number of exact MPS filters + * @rcaps: read capabilities + * @wxcaps: write/execute capabilities + * + * Configures resource limits and capabilities for a physical or virtual + * function. + */ +int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, + unsigned int rxqi, unsigned int rxq, unsigned int tc, + unsigned int vi, unsigned int cmask, unsigned int pmask, + unsigned int nexact, unsigned int rcaps, unsigned int wxcaps) +{ + struct fw_pfvf_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) | + FW_PFVF_CMD_VFN(vf)); + c.retval_len16 = htonl(FW_LEN16(c)); + c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) | + FW_PFVF_CMD_NIQ(rxq)); + c.cmask_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) | + FW_PFVF_CMD_PMASK(pmask) | + FW_PFVF_CMD_NEQ(txq)); + c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) | + FW_PFVF_CMD_NEXACTF(nexact)); + c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) | + FW_PFVF_CMD_WX_CAPS(wxcaps) | + FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl)); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_alloc_vi - allocate a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @port: physical port associated with the VI + * @pf: the PF owning the VI + * @vf: the VF owning the VI + * @nmac: number of MAC addresses needed (1 to 5) + * @mac: the MAC addresses of the VI + * @rss_size: size of RSS table slice associated with this VI + * + * Allocates a virtual interface for the given physical port. If @mac is + * not %NULL it contains the MAC addresses of the VI as assigned by FW. + * @mac should be large enough to hold @nmac Ethernet addresses, they are + * stored consecutively so the space needed is @nmac * 6 bytes. + * Returns a negative error number or the non-negative VI id. + */ +int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, + unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, + unsigned int *rss_size) +{ + int ret; + struct fw_vi_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_CMD_EXEC | + FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf)); + c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c)); + c.portid_pkd = FW_VI_CMD_PORTID(port); + c.nmac = nmac - 1; + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret) + return ret; + + if (mac) { + memcpy(mac, c.mac, sizeof(c.mac)); + switch (nmac) { + case 5: + memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); + case 4: + memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); + case 3: + memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); + case 2: + memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); + } + } + if (rss_size) + *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd)); + return ntohs(c.viid_pkd); +} + +/** + * t4_free_vi - free a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the VI + * @vf: the VF owning the VI + * @viid: virtual interface identifiler + * + * Free a previously allocated virtual interface. + */ +int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int viid) +{ + struct fw_vi_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_VI_CMD_PFN(pf) | + FW_VI_CMD_VFN(vf)); + c.alloc_to_len16 = htonl(FW_VI_CMD_FREE | FW_LEN16(c)); + c.viid_pkd = htons(FW_VI_CMD_VIID(viid)); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); +} + +/** + * t4_set_rxmode - set Rx properties of a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @mtu: the new MTU or -1 + * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change + * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change + * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change + * @sleep_ok: if true we may sleep while awaiting command completion + * + * Sets Rx properties of a virtual interface. + */ +int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, + int mtu, int promisc, int all_multi, int bcast, bool sleep_ok) +{ + struct fw_vi_rxmode_cmd c; + + /* convert to FW values */ + if (mtu < 0) + mtu = FW_RXMODE_MTU_NO_CHG; + if (promisc < 0) + promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK; + if (all_multi < 0) + all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK; + if (bcast < 0) + bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid)); + c.retval_len16 = htonl(FW_LEN16(c)); + c.mtu_to_broadcasten = htonl(FW_VI_RXMODE_CMD_MTU(mtu) | + FW_VI_RXMODE_CMD_PROMISCEN(promisc) | + FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | + FW_VI_RXMODE_CMD_BROADCASTEN(bcast)); + return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); +} + +/** + * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @free: if true any existing filters for this VI id are first removed + * @naddr: the number of MAC addresses to allocate filters for (up to 7) + * @addr: the MAC address(es) + * @idx: where to store the index of each allocated filter + * @hash: pointer to hash address filter bitmap + * @sleep_ok: call is allowed to sleep + * + * Allocates an exact-match filter for each of the supplied addresses and + * sets it to the corresponding address. If @idx is not %NULL it should + * have at least @naddr entries, each of which will be set to the index of + * the filter allocated for the corresponding MAC address. If a filter + * could not be allocated for an address its index is set to 0xffff. + * If @hash is not %NULL addresses that fail to allocate an exact filter + * are hashed and update the hash filter bitmap pointed at by @hash. + * + * Returns a negative error number or the number of filters allocated. + */ +int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, + unsigned int viid, bool free, unsigned int naddr, + const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) +{ + int i, ret; + struct fw_vi_mac_cmd c; + struct fw_vi_mac_exact *p; + + if (naddr > 7) + return -EINVAL; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) | + FW_VI_MAC_CMD_VIID(viid)); + c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) | + FW_CMD_LEN16((naddr + 2) / 2)); + + for (i = 0, p = c.u.exact; i < naddr; i++, p++) { + p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | + FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); + memcpy(p->macaddr, addr[i], sizeof(p->macaddr)); + } + + ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); + if (ret) + return ret; + + for (i = 0, p = c.u.exact; i < naddr; i++, p++) { + u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); + + if (idx) + idx[i] = index >= NEXACT_MAC ? 0xffff : index; + if (index < NEXACT_MAC) + ret++; + else if (hash) + *hash |= (1 << hash_mac_addr(addr[i])); + } + return ret; +} + +/** + * t4_change_mac - modifies the exact-match filter for a MAC address + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @idx: index of existing filter for old value of MAC address, or -1 + * @addr: the new MAC address value + * @persist: whether a new MAC allocation should be persistent + * @add_smt: if true also add the address to the HW SMT + * + * Modifies an exact-match filter and sets it to the new MAC address. + * Note that in general it is not possible to modify the value of a given + * filter so the generic way to modify an address filter is to free the one + * being used by the old address value and allocate a new filter for the + * new address value. @idx can be -1 if the address is a new addition. + * + * Returns a negative error number or the index of the filter with the new + * MAC value. + */ +int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, + int idx, const u8 *addr, bool persist, bool add_smt) +{ + int ret, mode; + struct fw_vi_mac_cmd c; + struct fw_vi_mac_exact *p = c.u.exact; + + if (idx < 0) /* new allocation */ + idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; + mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid)); + c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1)); + p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | + FW_VI_MAC_CMD_SMAC_RESULT(mode) | + FW_VI_MAC_CMD_IDX(idx)); + memcpy(p->macaddr, addr, sizeof(p->macaddr)); + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret == 0) { + ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); + if (ret >= NEXACT_MAC) + ret = -ENOMEM; + } + return ret; +} + +/** + * t4_set_addr_hash - program the MAC inexact-match hash filter + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @ucast: whether the hash filter should also match unicast addresses + * @vec: the value to be written to the hash filter + * @sleep_ok: call is allowed to sleep + * + * Sets the 64-bit inexact-match hash filter for a virtual interface. + */ +int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, + bool ucast, u64 vec, bool sleep_ok) +{ + struct fw_vi_mac_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid)); + c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN | + FW_VI_MAC_CMD_HASHUNIEN(ucast) | + FW_CMD_LEN16(1)); + c.u.hash.hashvec = cpu_to_be64(vec); + return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); +} + +/** + * t4_enable_vi - enable/disable a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @rx_en: 1=enable Rx, 0=disable Rx + * @tx_en: 1=enable Tx, 0=disable Tx + * + * Enables/disables a virtual interface. + */ +int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, + bool rx_en, bool tx_en) +{ + struct fw_vi_enable_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); + c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) | + FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c)); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_identify_port - identify a VI's port by blinking its LED + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @nblinks: how many times to blink LED at 2.5 Hz + * + * Identifies a VI's port by blinking its LED. + */ +int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, + unsigned int nblinks) +{ + struct fw_vi_enable_cmd c; + + c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); + c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); + c.blinkdur = htons(nblinks); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_iq_start_stop - enable/disable an ingress queue and its FLs + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @start: %true to enable the queues, %false to disable them + * @pf: the PF owning the queues + * @vf: the VF owning the queues + * @iqid: ingress queue id + * @fl0id: FL0 queue id or 0xffff if no attached FL0 + * @fl1id: FL1 queue id or 0xffff if no attached FL1 + * + * Starts or stops an ingress queue and its associated FLs, if any. + */ +int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start, + unsigned int pf, unsigned int vf, unsigned int iqid, + unsigned int fl0id, unsigned int fl1id) +{ + struct fw_iq_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) | + FW_IQ_CMD_VFN(vf)); + c.alloc_to_len16 = htonl(FW_IQ_CMD_IQSTART(start) | + FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c)); + c.iqid = htons(iqid); + c.fl0id = htons(fl0id); + c.fl1id = htons(fl1id); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_iq_free - free an ingress queue and its FLs + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the queues + * @vf: the VF owning the queues + * @iqtype: the ingress queue type + * @iqid: ingress queue id + * @fl0id: FL0 queue id or 0xffff if no attached FL0 + * @fl1id: FL1 queue id or 0xffff if no attached FL1 + * + * Frees an ingress queue and its associated FLs, if any. + */ +int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int iqtype, unsigned int iqid, + unsigned int fl0id, unsigned int fl1id) +{ + struct fw_iq_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) | + FW_IQ_CMD_VFN(vf)); + c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c)); + c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype)); + c.iqid = htons(iqid); + c.fl0id = htons(fl0id); + c.fl1id = htons(fl1id); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_eth_eq_free - free an Ethernet egress queue + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the queue + * @vf: the VF owning the queue + * @eqid: egress queue id + * + * Frees an Ethernet egress queue. + */ +int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid) +{ + struct fw_eq_eth_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) | + FW_EQ_ETH_CMD_VFN(vf)); + c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); + c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid)); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_ctrl_eq_free - free a control egress queue + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the queue + * @vf: the VF owning the queue + * @eqid: egress queue id + * + * Frees a control egress queue. + */ +int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid) +{ + struct fw_eq_ctrl_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) | + FW_EQ_CTRL_CMD_VFN(vf)); + c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c)); + c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid)); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_ofld_eq_free - free an offload egress queue + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the queue + * @vf: the VF owning the queue + * @eqid: egress queue id + * + * Frees a control egress queue. + */ +int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid) +{ + struct fw_eq_ofld_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST | + FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) | + FW_EQ_OFLD_CMD_VFN(vf)); + c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c)); + c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid)); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_handle_fw_rpl - process a FW reply message + * @adap: the adapter + * @rpl: start of the FW message + * + * Processes a FW message, such as link state change messages. + */ +int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) +{ + u8 opcode = *(const u8 *)rpl; + + if (opcode == FW_PORT_CMD) { /* link/module state change message */ + int speed = 0, fc = 0; + const struct fw_port_cmd *p = (void *)rpl; + int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid)); + int port = adap->chan_map[chan]; + struct port_info *pi = adap2pinfo(adap, port); + struct link_config *lc = &pi->link_cfg; + u32 stat = ntohl(p->u.info.lstatus_to_modtype); + int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0; + u32 mod = FW_PORT_CMD_MODTYPE_GET(stat); + + if (stat & FW_PORT_CMD_RXPAUSE) + fc |= PAUSE_RX; + if (stat & FW_PORT_CMD_TXPAUSE) + fc |= PAUSE_TX; + if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) + speed = SPEED_100; + else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) + speed = SPEED_1000; + else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) + speed = SPEED_10000; + + if (link_ok != lc->link_ok || speed != lc->speed || + fc != lc->fc) { /* something changed */ + lc->link_ok = link_ok; + lc->speed = speed; + lc->fc = fc; + t4_os_link_changed(adap, port, link_ok); + } + if (mod != pi->mod_type) { + pi->mod_type = mod; + t4_os_portmod_changed(adap, port); + } + } + return 0; +} + +static void __devinit get_pci_mode(struct adapter *adapter, + struct pci_params *p) +{ + u16 val; + u32 pcie_cap = pci_pcie_cap(adapter->pdev); + + if (pcie_cap) { + pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA, + &val); + p->speed = val & PCI_EXP_LNKSTA_CLS; + p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4; + } +} + +/** + * init_link_config - initialize a link's SW state + * @lc: structure holding the link state + * @caps: link capabilities + * + * Initializes the SW state maintained for each link, including the link's + * capabilities and default speed/flow-control/autonegotiation settings. + */ +static void __devinit init_link_config(struct link_config *lc, + unsigned int caps) +{ + lc->supported = caps; + lc->requested_speed = 0; + lc->speed = 0; + lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; + if (lc->supported & FW_PORT_CAP_ANEG) { + lc->advertising = lc->supported & ADVERT_MASK; + lc->autoneg = AUTONEG_ENABLE; + lc->requested_fc |= PAUSE_AUTONEG; + } else { + lc->advertising = 0; + lc->autoneg = AUTONEG_DISABLE; + } +} + +static int __devinit wait_dev_ready(struct adapter *adap) +{ + if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff) + return 0; + msleep(500); + return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO; +} + +/** + * t4_prep_adapter - prepare SW and HW for operation + * @adapter: the adapter + * @reset: if true perform a HW reset + * + * Initialize adapter SW state for the various HW modules, set initial + * values for some adapter tunables, take PHYs out of reset, and + * initialize the MDIO interface. + */ +int __devinit t4_prep_adapter(struct adapter *adapter) +{ + int ret; + + ret = wait_dev_ready(adapter); + if (ret < 0) + return ret; + + get_pci_mode(adapter, &adapter->params.pci); + adapter->params.rev = t4_read_reg(adapter, PL_REV); + + ret = get_vpd_params(adapter, &adapter->params.vpd); + if (ret < 0) + return ret; + + init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); + + /* + * Default port for debugging in case we can't reach FW. + */ + adapter->params.nports = 1; + adapter->params.portvec = 1; + return 0; +} + +int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf) +{ + u8 addr[6]; + int ret, i, j = 0; + struct fw_port_cmd c; + + memset(&c, 0, sizeof(c)); + + for_each_port(adap, i) { + unsigned int rss_size; + struct port_info *p = adap2pinfo(adap, i); + + while ((adap->params.portvec & (1 << j)) == 0) + j++; + + c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | + FW_CMD_REQUEST | FW_CMD_READ | + FW_PORT_CMD_PORTID(j)); + c.action_to_len16 = htonl( + FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | + FW_LEN16(c)); + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret) + return ret; + + ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size); + if (ret < 0) + return ret; + + p->viid = ret; + p->tx_chan = j; + p->lport = j; + p->rss_size = rss_size; + memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN); + memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN); + + ret = ntohl(c.u.info.lstatus_to_modtype); + p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ? + FW_PORT_CMD_MDIOADDR_GET(ret) : -1; + p->port_type = FW_PORT_CMD_PTYPE_GET(ret); + p->mod_type = FW_PORT_CMD_MODTYPE_GET(ret); + + init_link_config(&p->link_cfg, ntohs(c.u.info.pcap)); + j++; + } + return 0; +} diff --git a/drivers/net/cxgb4/t4_hw.h b/drivers/net/cxgb4/t4_hw.h new file mode 100644 index 000000000000..025623285c93 --- /dev/null +++ b/drivers/net/cxgb4/t4_hw.h @@ -0,0 +1,100 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __T4_HW_H +#define __T4_HW_H + +#include + +enum { + NCHAN = 4, /* # of HW channels */ + MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */ + EEPROMSIZE = 17408, /* Serial EEPROM physical size */ + EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */ + RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */ + TCB_SIZE = 128, /* TCB size */ + NMTUS = 16, /* size of MTU table */ + NCCTRL_WIN = 32, /* # of congestion control windows */ + NEXACT_MAC = 336, /* # of exact MAC address filters */ + L2T_SIZE = 4096, /* # of L2T entries */ + MBOX_LEN = 64, /* mailbox size in bytes */ + TRACE_LEN = 112, /* length of trace data and mask */ + FILTER_OPT_LEN = 36, /* filter tuple width for optional components */ + NWOL_PAT = 8, /* # of WoL patterns */ + WOL_PAT_LEN = 128, /* length of WoL patterns */ +}; + +enum { + SF_PAGE_SIZE = 256, /* serial flash page size */ + SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */ + SF_SIZE = SF_SEC_SIZE * 16, /* serial flash size */ +}; + +enum { RSP_TYPE_FLBUF, RSP_TYPE_CPL, RSP_TYPE_INTR }; /* response entry types */ + +enum { MBOX_OWNER_NONE, MBOX_OWNER_FW, MBOX_OWNER_DRV }; /* mailbox owners */ + +enum { + SGE_MAX_WR_LEN = 512, /* max WR size in bytes */ + SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */ + SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */ +}; + +struct sge_qstat { /* data written to SGE queue status entries */ + __be32 qid; + __be16 cidx; + __be16 pidx; +}; + +/* + * Structure for last 128 bits of response descriptors + */ +struct rsp_ctrl { + __be32 hdrbuflen_pidx; + __be32 pldbuflen_qid; + union { + u8 type_gen; + __be64 last_flit; + }; +}; + +#define RSPD_NEWBUF 0x80000000U +#define RSPD_LEN 0x7fffffffU + +#define RSPD_GEN(x) ((x) >> 7) +#define RSPD_TYPE(x) (((x) >> 4) & 3) + +#define QINTR_CNT_EN 0x1 +#define QINTR_TIMER_IDX(x) ((x) << 1) +#endif /* __T4_HW_H */ -- cgit v1.2.3 From fd3a47900b6f9fa72a4074ecb630f9dae62f1a95 Mon Sep 17 00:00:00 2001 From: Dimitris Michailidis Date: Thu, 1 Apr 2010 15:28:24 +0000 Subject: cxgb4: Add packet queues and packet DMA code Signed-off-by: Dimitris Michailidis Signed-off-by: David S. Miller --- drivers/net/cxgb4/sge.c | 2431 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 2431 insertions(+) create mode 100644 drivers/net/cxgb4/sge.c diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c new file mode 100644 index 000000000000..14adc58e71c3 --- /dev/null +++ b/drivers/net/cxgb4/sge.c @@ -0,0 +1,2431 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "cxgb4.h" +#include "t4_regs.h" +#include "t4_msg.h" +#include "t4fw_api.h" + +/* + * Rx buffer size. We use largish buffers if possible but settle for single + * pages under memory shortage. + */ +#if PAGE_SHIFT >= 16 +# define FL_PG_ORDER 0 +#else +# define FL_PG_ORDER (16 - PAGE_SHIFT) +#endif + +/* RX_PULL_LEN should be <= RX_COPY_THRES */ +#define RX_COPY_THRES 256 +#define RX_PULL_LEN 128 + +/* + * Main body length for sk_buffs used for Rx Ethernet packets with fragments. + * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room. + */ +#define RX_PKT_SKB_LEN 512 + +/* Ethernet header padding prepended to RX_PKTs */ +#define RX_PKT_PAD 2 + +/* + * Max number of Tx descriptors we clean up at a time. Should be modest as + * freeing skbs isn't cheap and it happens while holding locks. We just need + * to free packets faster than they arrive, we eventually catch up and keep + * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES. + */ +#define MAX_TX_RECLAIM 16 + +/* + * Max number of Rx buffers we replenish at a time. Again keep this modest, + * allocating buffers isn't cheap either. + */ +#define MAX_RX_REFILL 16U + +/* + * Period of the Rx queue check timer. This timer is infrequent as it has + * something to do only when the system experiences severe memory shortage. + */ +#define RX_QCHECK_PERIOD (HZ / 2) + +/* + * Period of the Tx queue check timer. + */ +#define TX_QCHECK_PERIOD (HZ / 2) + +/* + * Max number of Tx descriptors to be reclaimed by the Tx timer. + */ +#define MAX_TIMER_TX_RECLAIM 100 + +/* + * Timer index used when backing off due to memory shortage. + */ +#define NOMEM_TMR_IDX (SGE_NTIMERS - 1) + +/* + * An FL with <= FL_STARVE_THRES buffers is starving and a periodic timer will + * attempt to refill it. + */ +#define FL_STARVE_THRES 4 + +/* + * Suspend an Ethernet Tx queue with fewer available descriptors than this. + * This is the same as calc_tx_descs() for a TSO packet with + * nr_frags == MAX_SKB_FRAGS. + */ +#define ETHTXQ_STOP_THRES \ + (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8)) + +/* + * Suspension threshold for non-Ethernet Tx queues. We require enough room + * for a full sized WR. + */ +#define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc)) + +/* + * Max Tx descriptor space we allow for an Ethernet packet to be inlined + * into a WR. + */ +#define MAX_IMM_TX_PKT_LEN 128 + +/* + * Max size of a WR sent through a control Tx queue. + */ +#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN + +enum { + /* packet alignment in FL buffers */ + FL_ALIGN = L1_CACHE_BYTES < 32 ? 32 : L1_CACHE_BYTES, + /* egress status entry size */ + STAT_LEN = L1_CACHE_BYTES > 64 ? 128 : 64 +}; + +struct tx_sw_desc { /* SW state per Tx descriptor */ + struct sk_buff *skb; + struct ulptx_sgl *sgl; +}; + +struct rx_sw_desc { /* SW state per Rx descriptor */ + struct page *page; + dma_addr_t dma_addr; +}; + +/* + * The low bits of rx_sw_desc.dma_addr have special meaning. + */ +enum { + RX_LARGE_BUF = 1 << 0, /* buffer is larger than PAGE_SIZE */ + RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */ +}; + +static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d) +{ + return d->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF); +} + +static inline bool is_buf_mapped(const struct rx_sw_desc *d) +{ + return !(d->dma_addr & RX_UNMAPPED_BUF); +} + +/** + * txq_avail - return the number of available slots in a Tx queue + * @q: the Tx queue + * + * Returns the number of descriptors in a Tx queue available to write new + * packets. + */ +static inline unsigned int txq_avail(const struct sge_txq *q) +{ + return q->size - 1 - q->in_use; +} + +/** + * fl_cap - return the capacity of a free-buffer list + * @fl: the FL + * + * Returns the capacity of a free-buffer list. The capacity is less than + * the size because one descriptor needs to be left unpopulated, otherwise + * HW will think the FL is empty. + */ +static inline unsigned int fl_cap(const struct sge_fl *fl) +{ + return fl->size - 8; /* 1 descriptor = 8 buffers */ +} + +static inline bool fl_starving(const struct sge_fl *fl) +{ + return fl->avail - fl->pend_cred <= FL_STARVE_THRES; +} + +static int map_skb(struct device *dev, const struct sk_buff *skb, + dma_addr_t *addr) +{ + const skb_frag_t *fp, *end; + const struct skb_shared_info *si; + + *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); + if (dma_mapping_error(dev, *addr)) + goto out_err; + + si = skb_shinfo(skb); + end = &si->frags[si->nr_frags]; + + for (fp = si->frags; fp < end; fp++) { + *++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, *addr)) + goto unwind; + } + return 0; + +unwind: + while (fp-- > si->frags) + dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE); + + dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); +out_err: + return -ENOMEM; +} + +#ifdef CONFIG_NEED_DMA_MAP_STATE +static void unmap_skb(struct device *dev, const struct sk_buff *skb, + const dma_addr_t *addr) +{ + const skb_frag_t *fp, *end; + const struct skb_shared_info *si; + + dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE); + + si = skb_shinfo(skb); + end = &si->frags[si->nr_frags]; + for (fp = si->frags; fp < end; fp++) + dma_unmap_page(dev, *addr++, fp->size, DMA_TO_DEVICE); +} + +/** + * deferred_unmap_destructor - unmap a packet when it is freed + * @skb: the packet + * + * This is the packet destructor used for Tx packets that need to remain + * mapped until they are freed rather than until their Tx descriptors are + * freed. + */ +static void deferred_unmap_destructor(struct sk_buff *skb) +{ + unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head); +} +#endif + +static void unmap_sgl(struct device *dev, const struct sk_buff *skb, + const struct ulptx_sgl *sgl, const struct sge_txq *q) +{ + const struct ulptx_sge_pair *p; + unsigned int nfrags = skb_shinfo(skb)->nr_frags; + + if (likely(skb_headlen(skb))) + dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0), + DMA_TO_DEVICE); + else { + dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0), + DMA_TO_DEVICE); + nfrags--; + } + + /* + * the complexity below is because of the possibility of a wrap-around + * in the middle of an SGL + */ + for (p = sgl->sge; nfrags >= 2; nfrags -= 2) { + if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) { +unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]), + ntohl(p->len[0]), DMA_TO_DEVICE); + dma_unmap_page(dev, be64_to_cpu(p->addr[1]), + ntohl(p->len[1]), DMA_TO_DEVICE); + p++; + } else if ((u8 *)p == (u8 *)q->stat) { + p = (const struct ulptx_sge_pair *)q->desc; + goto unmap; + } else if ((u8 *)p + 8 == (u8 *)q->stat) { + const __be64 *addr = (const __be64 *)q->desc; + + dma_unmap_page(dev, be64_to_cpu(addr[0]), + ntohl(p->len[0]), DMA_TO_DEVICE); + dma_unmap_page(dev, be64_to_cpu(addr[1]), + ntohl(p->len[1]), DMA_TO_DEVICE); + p = (const struct ulptx_sge_pair *)&addr[2]; + } else { + const __be64 *addr = (const __be64 *)q->desc; + + dma_unmap_page(dev, be64_to_cpu(p->addr[0]), + ntohl(p->len[0]), DMA_TO_DEVICE); + dma_unmap_page(dev, be64_to_cpu(addr[0]), + ntohl(p->len[1]), DMA_TO_DEVICE); + p = (const struct ulptx_sge_pair *)&addr[1]; + } + } + if (nfrags) { + __be64 addr; + + if ((u8 *)p == (u8 *)q->stat) + p = (const struct ulptx_sge_pair *)q->desc; + addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] : + *(const __be64 *)q->desc; + dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]), + DMA_TO_DEVICE); + } +} + +/** + * free_tx_desc - reclaims Tx descriptors and their buffers + * @adapter: the adapter + * @q: the Tx queue to reclaim descriptors from + * @n: the number of descriptors to reclaim + * @unmap: whether the buffers should be unmapped for DMA + * + * Reclaims Tx descriptors from an SGE Tx queue and frees the associated + * Tx buffers. Called with the Tx queue lock held. + */ +static void free_tx_desc(struct adapter *adap, struct sge_txq *q, + unsigned int n, bool unmap) +{ + struct tx_sw_desc *d; + unsigned int cidx = q->cidx; + struct device *dev = adap->pdev_dev; + + d = &q->sdesc[cidx]; + while (n--) { + if (d->skb) { /* an SGL is present */ + if (unmap) + unmap_sgl(dev, d->skb, d->sgl, q); + kfree_skb(d->skb); + d->skb = NULL; + } + ++d; + if (++cidx == q->size) { + cidx = 0; + d = q->sdesc; + } + } + q->cidx = cidx; +} + +/* + * Return the number of reclaimable descriptors in a Tx queue. + */ +static inline int reclaimable(const struct sge_txq *q) +{ + int hw_cidx = ntohs(q->stat->cidx); + hw_cidx -= q->cidx; + return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; +} + +/** + * reclaim_completed_tx - reclaims completed Tx descriptors + * @adap: the adapter + * @q: the Tx queue to reclaim completed descriptors from + * @unmap: whether the buffers should be unmapped for DMA + * + * Reclaims Tx descriptors that the SGE has indicated it has processed, + * and frees the associated buffers if possible. Called with the Tx + * queue locked. + */ +static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, + bool unmap) +{ + int avail = reclaimable(q); + + if (avail) { + /* + * Limit the amount of clean up work we do at a time to keep + * the Tx lock hold time O(1). + */ + if (avail > MAX_TX_RECLAIM) + avail = MAX_TX_RECLAIM; + + free_tx_desc(adap, q, avail, unmap); + q->in_use -= avail; + } +} + +static inline int get_buf_size(const struct rx_sw_desc *d) +{ +#if FL_PG_ORDER > 0 + return (d->dma_addr & RX_LARGE_BUF) ? (PAGE_SIZE << FL_PG_ORDER) : + PAGE_SIZE; +#else + return PAGE_SIZE; +#endif +} + +/** + * free_rx_bufs - free the Rx buffers on an SGE free list + * @adap: the adapter + * @q: the SGE free list to free buffers from + * @n: how many buffers to free + * + * Release the next @n buffers on an SGE free-buffer Rx queue. The + * buffers must be made inaccessible to HW before calling this function. + */ +static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n) +{ + while (n--) { + struct rx_sw_desc *d = &q->sdesc[q->cidx]; + + if (is_buf_mapped(d)) + dma_unmap_page(adap->pdev_dev, get_buf_addr(d), + get_buf_size(d), PCI_DMA_FROMDEVICE); + put_page(d->page); + d->page = NULL; + if (++q->cidx == q->size) + q->cidx = 0; + q->avail--; + } +} + +/** + * unmap_rx_buf - unmap the current Rx buffer on an SGE free list + * @adap: the adapter + * @q: the SGE free list + * + * Unmap the current buffer on an SGE free-buffer Rx queue. The + * buffer must be made inaccessible to HW before calling this function. + * + * This is similar to @free_rx_bufs above but does not free the buffer. + * Do note that the FL still loses any further access to the buffer. + */ +static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q) +{ + struct rx_sw_desc *d = &q->sdesc[q->cidx]; + + if (is_buf_mapped(d)) + dma_unmap_page(adap->pdev_dev, get_buf_addr(d), + get_buf_size(d), PCI_DMA_FROMDEVICE); + d->page = NULL; + if (++q->cidx == q->size) + q->cidx = 0; + q->avail--; +} + +static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) +{ + if (q->pend_cred >= 8) { + wmb(); + t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO | + QID(q->cntxt_id) | PIDX(q->pend_cred / 8)); + q->pend_cred &= 7; + } +} + +static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg, + dma_addr_t mapping) +{ + sd->page = pg; + sd->dma_addr = mapping; /* includes size low bits */ +} + +/** + * refill_fl - refill an SGE Rx buffer ring + * @adap: the adapter + * @q: the ring to refill + * @n: the number of new buffers to allocate + * @gfp: the gfp flags for the allocations + * + * (Re)populate an SGE free-buffer queue with up to @n new packet buffers, + * allocated with the supplied gfp flags. The caller must assure that + * @n does not exceed the queue's capacity. If afterwards the queue is + * found critically low mark it as starving in the bitmap of starving FLs. + * + * Returns the number of buffers allocated. + */ +static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, + gfp_t gfp) +{ + struct page *pg; + dma_addr_t mapping; + unsigned int cred = q->avail; + __be64 *d = &q->desc[q->pidx]; + struct rx_sw_desc *sd = &q->sdesc[q->pidx]; + + gfp |= __GFP_NOWARN; /* failures are expected */ + +#if FL_PG_ORDER > 0 + /* + * Prefer large buffers + */ + while (n) { + pg = alloc_pages(gfp | __GFP_COMP, FL_PG_ORDER); + if (unlikely(!pg)) { + q->large_alloc_failed++; + break; /* fall back to single pages */ + } + + mapping = dma_map_page(adap->pdev_dev, pg, 0, + PAGE_SIZE << FL_PG_ORDER, + PCI_DMA_FROMDEVICE); + if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { + __free_pages(pg, FL_PG_ORDER); + goto out; /* do not try small pages for this error */ + } + mapping |= RX_LARGE_BUF; + *d++ = cpu_to_be64(mapping); + + set_rx_sw_desc(sd, pg, mapping); + sd++; + + q->avail++; + if (++q->pidx == q->size) { + q->pidx = 0; + sd = q->sdesc; + d = q->desc; + } + n--; + } +#endif + + while (n--) { + pg = __netdev_alloc_page(adap->port[0], gfp); + if (unlikely(!pg)) { + q->alloc_failed++; + break; + } + + mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { + netdev_free_page(adap->port[0], pg); + goto out; + } + *d++ = cpu_to_be64(mapping); + + set_rx_sw_desc(sd, pg, mapping); + sd++; + + q->avail++; + if (++q->pidx == q->size) { + q->pidx = 0; + sd = q->sdesc; + d = q->desc; + } + } + +out: cred = q->avail - cred; + q->pend_cred += cred; + ring_fl_db(adap, q); + + if (unlikely(fl_starving(q))) { + smp_wmb(); + set_bit(q->cntxt_id, adap->sge.starving_fl); + } + + return cred; +} + +static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) +{ + refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail), + GFP_ATOMIC); +} + +/** + * alloc_ring - allocate resources for an SGE descriptor ring + * @dev: the PCI device's core device + * @nelem: the number of descriptors + * @elem_size: the size of each descriptor + * @sw_size: the size of the SW state associated with each ring element + * @phys: the physical address of the allocated ring + * @metadata: address of the array holding the SW state for the ring + * @stat_size: extra space in HW ring for status information + * + * Allocates resources for an SGE descriptor ring, such as Tx queues, + * free buffer lists, or response queues. Each SGE ring requires + * space for its HW descriptors plus, optionally, space for the SW state + * associated with each HW entry (the metadata). The function returns + * three values: the virtual address for the HW ring (the return value + * of the function), the bus address of the HW ring, and the address + * of the SW ring. + */ +static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size, + size_t sw_size, dma_addr_t *phys, void *metadata, + size_t stat_size) +{ + size_t len = nelem * elem_size + stat_size; + void *s = NULL; + void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL); + + if (!p) + return NULL; + if (sw_size) { + s = kcalloc(nelem, sw_size, GFP_KERNEL); + + if (!s) { + dma_free_coherent(dev, len, p, *phys); + return NULL; + } + } + if (metadata) + *(void **)metadata = s; + memset(p, 0, len); + return p; +} + +/** + * sgl_len - calculates the size of an SGL of the given capacity + * @n: the number of SGL entries + * + * Calculates the number of flits needed for a scatter/gather list that + * can hold the given number of entries. + */ +static inline unsigned int sgl_len(unsigned int n) +{ + n--; + return (3 * n) / 2 + (n & 1) + 2; +} + +/** + * flits_to_desc - returns the num of Tx descriptors for the given flits + * @n: the number of flits + * + * Returns the number of Tx descriptors needed for the supplied number + * of flits. + */ +static inline unsigned int flits_to_desc(unsigned int n) +{ + BUG_ON(n > SGE_MAX_WR_LEN / 8); + return DIV_ROUND_UP(n, 8); +} + +/** + * is_eth_imm - can an Ethernet packet be sent as immediate data? + * @skb: the packet + * + * Returns whether an Ethernet packet is small enough to fit as + * immediate data. + */ +static inline int is_eth_imm(const struct sk_buff *skb) +{ + return skb->len <= MAX_IMM_TX_PKT_LEN - sizeof(struct cpl_tx_pkt); +} + +/** + * calc_tx_flits - calculate the number of flits for a packet Tx WR + * @skb: the packet + * + * Returns the number of flits needed for a Tx WR for the given Ethernet + * packet, including the needed WR and CPL headers. + */ +static inline unsigned int calc_tx_flits(const struct sk_buff *skb) +{ + unsigned int flits; + + if (is_eth_imm(skb)) + return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), 8); + + flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4; + if (skb_shinfo(skb)->gso_size) + flits += 2; + return flits; +} + +/** + * calc_tx_descs - calculate the number of Tx descriptors for a packet + * @skb: the packet + * + * Returns the number of Tx descriptors needed for the given Ethernet + * packet, including the needed WR and CPL headers. + */ +static inline unsigned int calc_tx_descs(const struct sk_buff *skb) +{ + return flits_to_desc(calc_tx_flits(skb)); +} + +/** + * write_sgl - populate a scatter/gather list for a packet + * @skb: the packet + * @q: the Tx queue we are writing into + * @sgl: starting location for writing the SGL + * @end: points right after the end of the SGL + * @start: start offset into skb main-body data to include in the SGL + * @addr: the list of bus addresses for the SGL elements + * + * Generates a gather list for the buffers that make up a packet. + * The caller must provide adequate space for the SGL that will be written. + * The SGL includes all of the packet's page fragments and the data in its + * main body except for the first @start bytes. @sgl must be 16-byte + * aligned and within a Tx descriptor with available space. @end points + * right after the end of the SGL but does not account for any potential + * wrap around, i.e., @end > @sgl. + */ +static void write_sgl(const struct sk_buff *skb, struct sge_txq *q, + struct ulptx_sgl *sgl, u64 *end, unsigned int start, + const dma_addr_t *addr) +{ + unsigned int i, len; + struct ulptx_sge_pair *to; + const struct skb_shared_info *si = skb_shinfo(skb); + unsigned int nfrags = si->nr_frags; + struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1]; + + len = skb_headlen(skb) - start; + if (likely(len)) { + sgl->len0 = htonl(len); + sgl->addr0 = cpu_to_be64(addr[0] + start); + nfrags++; + } else { + sgl->len0 = htonl(si->frags[0].size); + sgl->addr0 = cpu_to_be64(addr[1]); + } + + sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags)); + if (likely(--nfrags == 0)) + return; + /* + * Most of the complexity below deals with the possibility we hit the + * end of the queue in the middle of writing the SGL. For this case + * only we create the SGL in a temporary buffer and then copy it. + */ + to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; + + for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { + to->len[0] = cpu_to_be32(si->frags[i].size); + to->len[1] = cpu_to_be32(si->frags[++i].size); + to->addr[0] = cpu_to_be64(addr[i]); + to->addr[1] = cpu_to_be64(addr[++i]); + } + if (nfrags) { + to->len[0] = cpu_to_be32(si->frags[i].size); + to->len[1] = cpu_to_be32(0); + to->addr[0] = cpu_to_be64(addr[i + 1]); + } + if (unlikely((u8 *)end > (u8 *)q->stat)) { + unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1; + + if (likely(part0)) + memcpy(sgl->sge, buf, part0); + part1 = (u8 *)end - (u8 *)q->stat; + memcpy(q->desc, (u8 *)buf + part0, part1); + end = (void *)q->desc + part1; + } + if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ + *(u64 *)end = 0; +} + +/** + * ring_tx_db - check and potentially ring a Tx queue's doorbell + * @adap: the adapter + * @q: the Tx queue + * @n: number of new descriptors to give to HW + * + * Ring the doorbel for a Tx queue. + */ +static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) +{ + wmb(); /* write descriptors before telling HW */ + t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), + QID(q->cntxt_id) | PIDX(n)); +} + +/** + * inline_tx_skb - inline a packet's data into Tx descriptors + * @skb: the packet + * @q: the Tx queue where the packet will be inlined + * @pos: starting position in the Tx queue where to inline the packet + * + * Inline a packet's contents directly into Tx descriptors, starting at + * the given position within the Tx DMA ring. + * Most of the complexity of this operation is dealing with wrap arounds + * in the middle of the packet we want to inline. + */ +static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q, + void *pos) +{ + u64 *p; + int left = (void *)q->stat - pos; + + if (likely(skb->len <= left)) { + if (likely(!skb->data_len)) + skb_copy_from_linear_data(skb, pos, skb->len); + else + skb_copy_bits(skb, 0, pos, skb->len); + pos += skb->len; + } else { + skb_copy_bits(skb, 0, pos, left); + skb_copy_bits(skb, left, q->desc, skb->len - left); + pos = (void *)q->desc + (skb->len - left); + } + + /* 0-pad to multiple of 16 */ + p = PTR_ALIGN(pos, 8); + if ((uintptr_t)p & 8) + *p = 0; +} + +/* + * Figure out what HW csum a packet wants and return the appropriate control + * bits. + */ +static u64 hwcsum(const struct sk_buff *skb) +{ + int csum_type; + const struct iphdr *iph = ip_hdr(skb); + + if (iph->version == 4) { + if (iph->protocol == IPPROTO_TCP) + csum_type = TX_CSUM_TCPIP; + else if (iph->protocol == IPPROTO_UDP) + csum_type = TX_CSUM_UDPIP; + else { +nocsum: /* + * unknown protocol, disable HW csum + * and hope a bad packet is detected + */ + return TXPKT_L4CSUM_DIS; + } + } else { + /* + * this doesn't work with extension headers + */ + const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph; + + if (ip6h->nexthdr == IPPROTO_TCP) + csum_type = TX_CSUM_TCPIP6; + else if (ip6h->nexthdr == IPPROTO_UDP) + csum_type = TX_CSUM_UDPIP6; + else + goto nocsum; + } + + if (likely(csum_type >= TX_CSUM_TCPIP)) + return TXPKT_CSUM_TYPE(csum_type) | + TXPKT_IPHDR_LEN(skb_network_header_len(skb)) | + TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN); + else { + int start = skb_transport_offset(skb); + + return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) | + TXPKT_CSUM_LOC(start + skb->csum_offset); + } +} + +static void eth_txq_stop(struct sge_eth_txq *q) +{ + netif_tx_stop_queue(q->txq); + q->q.stops++; +} + +static inline void txq_advance(struct sge_txq *q, unsigned int n) +{ + q->in_use += n; + q->pidx += n; + if (q->pidx >= q->size) + q->pidx -= q->size; +} + +/** + * t4_eth_xmit - add a packet to an Ethernet Tx queue + * @skb: the packet + * @dev: the egress net device + * + * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled. + */ +netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev) +{ + u32 wr_mid; + u64 cntrl, *end; + int qidx, credits; + unsigned int flits, ndesc; + struct adapter *adap; + struct sge_eth_txq *q; + const struct port_info *pi; + struct fw_eth_tx_pkt_wr *wr; + struct cpl_tx_pkt_core *cpl; + const struct skb_shared_info *ssi; + dma_addr_t addr[MAX_SKB_FRAGS + 1]; + + /* + * The chip min packet length is 10 octets but play safe and reject + * anything shorter than an Ethernet header. + */ + if (unlikely(skb->len < ETH_HLEN)) { +out_free: dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + pi = netdev_priv(dev); + adap = pi->adapter; + qidx = skb_get_queue_mapping(skb); + q = &adap->sge.ethtxq[qidx + pi->first_qset]; + + reclaim_completed_tx(adap, &q->q, true); + + flits = calc_tx_flits(skb); + ndesc = flits_to_desc(flits); + credits = txq_avail(&q->q) - ndesc; + + if (unlikely(credits < 0)) { + eth_txq_stop(q); + dev_err(adap->pdev_dev, + "%s: Tx ring %u full while queue awake!\n", + dev->name, qidx); + return NETDEV_TX_BUSY; + } + + if (!is_eth_imm(skb) && + unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) { + q->mapping_err++; + goto out_free; + } + + wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2)); + if (unlikely(credits < ETHTXQ_STOP_THRES)) { + eth_txq_stop(q); + wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ; + } + + wr = (void *)&q->q.desc[q->q.pidx]; + wr->equiq_to_len16 = htonl(wr_mid); + wr->r3 = cpu_to_be64(0); + end = (u64 *)wr + flits; + + ssi = skb_shinfo(skb); + if (ssi->gso_size) { + struct cpl_tx_pkt_lso *lso = (void *)wr; + bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; + int l3hdr_len = skb_network_header_len(skb); + int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; + + wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) | + FW_WR_IMMDLEN(sizeof(*lso))); + lso->lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) | + LSO_FIRST_SLICE | LSO_LAST_SLICE | + LSO_IPV6(v6) | + LSO_ETHHDR_LEN(eth_xtra_len / 4) | + LSO_IPHDR_LEN(l3hdr_len / 4) | + LSO_TCPHDR_LEN(tcp_hdr(skb)->doff)); + lso->ipid_ofst = htons(0); + lso->mss = htons(ssi->gso_size); + lso->seqno_offset = htonl(0); + lso->len = htonl(skb->len); + cpl = (void *)(lso + 1); + cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | + TXPKT_IPHDR_LEN(l3hdr_len) | + TXPKT_ETHHDR_LEN(eth_xtra_len); + q->tso++; + q->tx_cso += ssi->gso_segs; + } else { + int len; + + len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl); + wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) | + FW_WR_IMMDLEN(len)); + cpl = (void *)(wr + 1); + if (skb->ip_summed == CHECKSUM_PARTIAL) { + cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS; + q->tx_cso++; + } else + cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS; + } + + if (vlan_tx_tag_present(skb)) { + q->vlan_ins++; + cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb)); + } + + cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) | + TXPKT_INTF(pi->tx_chan) | TXPKT_PF(0)); + cpl->pack = htons(0); + cpl->len = htons(skb->len); + cpl->ctrl1 = cpu_to_be64(cntrl); + + if (is_eth_imm(skb)) { + inline_tx_skb(skb, &q->q, cpl + 1); + dev_kfree_skb(skb); + } else { + int last_desc; + + write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0, + addr); + skb_orphan(skb); + + last_desc = q->q.pidx + ndesc - 1; + if (last_desc >= q->q.size) + last_desc -= q->q.size; + q->q.sdesc[last_desc].skb = skb; + q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1); + } + + txq_advance(&q->q, ndesc); + + ring_tx_db(adap, &q->q, ndesc); + return NETDEV_TX_OK; +} + +/** + * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs + * @q: the SGE control Tx queue + * + * This is a variant of reclaim_completed_tx() that is used for Tx queues + * that send only immediate data (presently just the control queues) and + * thus do not have any sk_buffs to release. + */ +static inline void reclaim_completed_tx_imm(struct sge_txq *q) +{ + int hw_cidx = ntohs(q->stat->cidx); + int reclaim = hw_cidx - q->cidx; + + if (reclaim < 0) + reclaim += q->size; + + q->in_use -= reclaim; + q->cidx = hw_cidx; +} + +/** + * is_imm - check whether a packet can be sent as immediate data + * @skb: the packet + * + * Returns true if a packet can be sent as a WR with immediate data. + */ +static inline int is_imm(const struct sk_buff *skb) +{ + return skb->len <= MAX_CTRL_WR_LEN; +} + +/** + * ctrlq_check_stop - check if a control queue is full and should stop + * @q: the queue + * @wr: most recent WR written to the queue + * + * Check if a control queue has become full and should be stopped. + * We clean up control queue descriptors very lazily, only when we are out. + * If the queue is still full after reclaiming any completed descriptors + * we suspend it and have the last WR wake it up. + */ +static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr) +{ + reclaim_completed_tx_imm(&q->q); + if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { + wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ); + q->q.stops++; + q->full = 1; + } +} + +/** + * ctrl_xmit - send a packet through an SGE control Tx queue + * @q: the control queue + * @skb: the packet + * + * Send a packet through an SGE control Tx queue. Packets sent through + * a control queue must fit entirely as immediate data. + */ +static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb) +{ + unsigned int ndesc; + struct fw_wr_hdr *wr; + + if (unlikely(!is_imm(skb))) { + WARN_ON(1); + dev_kfree_skb(skb); + return NET_XMIT_DROP; + } + + ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc)); + spin_lock(&q->sendq.lock); + + if (unlikely(q->full)) { + skb->priority = ndesc; /* save for restart */ + __skb_queue_tail(&q->sendq, skb); + spin_unlock(&q->sendq.lock); + return NET_XMIT_CN; + } + + wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; + inline_tx_skb(skb, &q->q, wr); + + txq_advance(&q->q, ndesc); + if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) + ctrlq_check_stop(q, wr); + + ring_tx_db(q->adap, &q->q, ndesc); + spin_unlock(&q->sendq.lock); + + kfree_skb(skb); + return NET_XMIT_SUCCESS; +} + +/** + * restart_ctrlq - restart a suspended control queue + * @data: the control queue to restart + * + * Resumes transmission on a suspended Tx control queue. + */ +static void restart_ctrlq(unsigned long data) +{ + struct sk_buff *skb; + unsigned int written = 0; + struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data; + + spin_lock(&q->sendq.lock); + reclaim_completed_tx_imm(&q->q); + BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */ + + while ((skb = __skb_dequeue(&q->sendq)) != NULL) { + struct fw_wr_hdr *wr; + unsigned int ndesc = skb->priority; /* previously saved */ + + /* + * Write descriptors and free skbs outside the lock to limit + * wait times. q->full is still set so new skbs will be queued. + */ + spin_unlock(&q->sendq.lock); + + wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; + inline_tx_skb(skb, &q->q, wr); + kfree_skb(skb); + + written += ndesc; + txq_advance(&q->q, ndesc); + if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { + unsigned long old = q->q.stops; + + ctrlq_check_stop(q, wr); + if (q->q.stops != old) { /* suspended anew */ + spin_lock(&q->sendq.lock); + goto ringdb; + } + } + if (written > 16) { + ring_tx_db(q->adap, &q->q, written); + written = 0; + } + spin_lock(&q->sendq.lock); + } + q->full = 0; +ringdb: if (written) + ring_tx_db(q->adap, &q->q, written); + spin_unlock(&q->sendq.lock); +} + +/** + * t4_mgmt_tx - send a management message + * @adap: the adapter + * @skb: the packet containing the management message + * + * Send a management message through control queue 0. + */ +int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb) +{ + int ret; + + local_bh_disable(); + ret = ctrl_xmit(&adap->sge.ctrlq[0], skb); + local_bh_enable(); + return ret; +} + +/** + * is_ofld_imm - check whether a packet can be sent as immediate data + * @skb: the packet + * + * Returns true if a packet can be sent as an offload WR with immediate + * data. We currently use the same limit as for Ethernet packets. + */ +static inline int is_ofld_imm(const struct sk_buff *skb) +{ + return skb->len <= MAX_IMM_TX_PKT_LEN; +} + +/** + * calc_tx_flits_ofld - calculate # of flits for an offload packet + * @skb: the packet + * + * Returns the number of flits needed for the given offload packet. + * These packets are already fully constructed and no additional headers + * will be added. + */ +static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) +{ + unsigned int flits, cnt; + + if (is_ofld_imm(skb)) + return DIV_ROUND_UP(skb->len, 8); + + flits = skb_transport_offset(skb) / 8U; /* headers */ + cnt = skb_shinfo(skb)->nr_frags; + if (skb->tail != skb->transport_header) + cnt++; + return flits + sgl_len(cnt); +} + +/** + * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion + * @adap: the adapter + * @q: the queue to stop + * + * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting + * inability to map packets. A periodic timer attempts to restart + * queues so marked. + */ +static void txq_stop_maperr(struct sge_ofld_txq *q) +{ + q->mapping_err++; + q->q.stops++; + set_bit(q->q.cntxt_id, q->adap->sge.txq_maperr); +} + +/** + * ofldtxq_stop - stop an offload Tx queue that has become full + * @q: the queue to stop + * @skb: the packet causing the queue to become full + * + * Stops an offload Tx queue that has become full and modifies the packet + * being written to request a wakeup. + */ +static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb) +{ + struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data; + + wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ); + q->q.stops++; + q->full = 1; +} + +/** + * service_ofldq - restart a suspended offload queue + * @q: the offload queue + * + * Services an offload Tx queue by moving packets from its packet queue + * to the HW Tx ring. The function starts and ends with the queue locked. + */ +static void service_ofldq(struct sge_ofld_txq *q) +{ + u64 *pos; + int credits; + struct sk_buff *skb; + unsigned int written = 0; + unsigned int flits, ndesc; + + while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) { + /* + * We drop the lock but leave skb on sendq, thus retaining + * exclusive access to the state of the queue. + */ + spin_unlock(&q->sendq.lock); + + reclaim_completed_tx(q->adap, &q->q, false); + + flits = skb->priority; /* previously saved */ + ndesc = flits_to_desc(flits); + credits = txq_avail(&q->q) - ndesc; + BUG_ON(credits < 0); + if (unlikely(credits < TXQ_STOP_THRES)) + ofldtxq_stop(q, skb); + + pos = (u64 *)&q->q.desc[q->q.pidx]; + if (is_ofld_imm(skb)) + inline_tx_skb(skb, &q->q, pos); + else if (map_skb(q->adap->pdev_dev, skb, + (dma_addr_t *)skb->head)) { + txq_stop_maperr(q); + spin_lock(&q->sendq.lock); + break; + } else { + int last_desc, hdr_len = skb_transport_offset(skb); + + memcpy(pos, skb->data, hdr_len); + write_sgl(skb, &q->q, (void *)pos + hdr_len, + pos + flits, hdr_len, + (dma_addr_t *)skb->head); +#ifdef CONFIG_NEED_DMA_MAP_STATE + skb->dev = q->adap->port[0]; + skb->destructor = deferred_unmap_destructor; +#endif + last_desc = q->q.pidx + ndesc - 1; + if (last_desc >= q->q.size) + last_desc -= q->q.size; + q->q.sdesc[last_desc].skb = skb; + } + + txq_advance(&q->q, ndesc); + written += ndesc; + if (unlikely(written > 32)) { + ring_tx_db(q->adap, &q->q, written); + written = 0; + } + + spin_lock(&q->sendq.lock); + __skb_unlink(skb, &q->sendq); + if (is_ofld_imm(skb)) + kfree_skb(skb); + } + if (likely(written)) + ring_tx_db(q->adap, &q->q, written); +} + +/** + * ofld_xmit - send a packet through an offload queue + * @q: the Tx offload queue + * @skb: the packet + * + * Send an offload packet through an SGE offload queue. + */ +static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb) +{ + skb->priority = calc_tx_flits_ofld(skb); /* save for restart */ + spin_lock(&q->sendq.lock); + __skb_queue_tail(&q->sendq, skb); + if (q->sendq.qlen == 1) + service_ofldq(q); + spin_unlock(&q->sendq.lock); + return NET_XMIT_SUCCESS; +} + +/** + * restart_ofldq - restart a suspended offload queue + * @data: the offload queue to restart + * + * Resumes transmission on a suspended Tx offload queue. + */ +static void restart_ofldq(unsigned long data) +{ + struct sge_ofld_txq *q = (struct sge_ofld_txq *)data; + + spin_lock(&q->sendq.lock); + q->full = 0; /* the queue actually is completely empty now */ + service_ofldq(q); + spin_unlock(&q->sendq.lock); +} + +/** + * skb_txq - return the Tx queue an offload packet should use + * @skb: the packet + * + * Returns the Tx queue an offload packet should use as indicated by bits + * 1-15 in the packet's queue_mapping. + */ +static inline unsigned int skb_txq(const struct sk_buff *skb) +{ + return skb->queue_mapping >> 1; +} + +/** + * is_ctrl_pkt - return whether an offload packet is a control packet + * @skb: the packet + * + * Returns whether an offload packet should use an OFLD or a CTRL + * Tx queue as indicated by bit 0 in the packet's queue_mapping. + */ +static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb) +{ + return skb->queue_mapping & 1; +} + +static inline int ofld_send(struct adapter *adap, struct sk_buff *skb) +{ + unsigned int idx = skb_txq(skb); + + if (unlikely(is_ctrl_pkt(skb))) + return ctrl_xmit(&adap->sge.ctrlq[idx], skb); + return ofld_xmit(&adap->sge.ofldtxq[idx], skb); +} + +/** + * t4_ofld_send - send an offload packet + * @adap: the adapter + * @skb: the packet + * + * Sends an offload packet. We use the packet queue_mapping to select the + * appropriate Tx queue as follows: bit 0 indicates whether the packet + * should be sent as regular or control, bits 1-15 select the queue. + */ +int t4_ofld_send(struct adapter *adap, struct sk_buff *skb) +{ + int ret; + + local_bh_disable(); + ret = ofld_send(adap, skb); + local_bh_enable(); + return ret; +} + +/** + * cxgb4_ofld_send - send an offload packet + * @dev: the net device + * @skb: the packet + * + * Sends an offload packet. This is an exported version of @t4_ofld_send, + * intended for ULDs. + */ +int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb) +{ + return t4_ofld_send(netdev2adap(dev), skb); +} +EXPORT_SYMBOL(cxgb4_ofld_send); + +static inline void copy_frags(struct skb_shared_info *ssi, + const struct pkt_gl *gl, unsigned int offset) +{ + unsigned int n; + + /* usually there's just one frag */ + ssi->frags[0].page = gl->frags[0].page; + ssi->frags[0].page_offset = gl->frags[0].page_offset + offset; + ssi->frags[0].size = gl->frags[0].size - offset; + ssi->nr_frags = gl->nfrags; + n = gl->nfrags - 1; + if (n) + memcpy(&ssi->frags[1], &gl->frags[1], n * sizeof(skb_frag_t)); + + /* get a reference to the last page, we don't own it */ + get_page(gl->frags[n].page); +} + +/** + * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list + * @gl: the gather list + * @skb_len: size of sk_buff main body if it carries fragments + * @pull_len: amount of data to move to the sk_buff's main body + * + * Builds an sk_buff from the given packet gather list. Returns the + * sk_buff or %NULL if sk_buff allocation failed. + */ +struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl, + unsigned int skb_len, unsigned int pull_len) +{ + struct sk_buff *skb; + + /* + * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer + * size, which is expected since buffers are at least PAGE_SIZEd. + * In this case packets up to RX_COPY_THRES have only one fragment. + */ + if (gl->tot_len <= RX_COPY_THRES) { + skb = dev_alloc_skb(gl->tot_len); + if (unlikely(!skb)) + goto out; + __skb_put(skb, gl->tot_len); + skb_copy_to_linear_data(skb, gl->va, gl->tot_len); + } else { + skb = dev_alloc_skb(skb_len); + if (unlikely(!skb)) + goto out; + __skb_put(skb, pull_len); + skb_copy_to_linear_data(skb, gl->va, pull_len); + + copy_frags(skb_shinfo(skb), gl, pull_len); + skb->len = gl->tot_len; + skb->data_len = skb->len - pull_len; + skb->truesize += skb->data_len; + } +out: return skb; +} +EXPORT_SYMBOL(cxgb4_pktgl_to_skb); + +/** + * t4_pktgl_free - free a packet gather list + * @gl: the gather list + * + * Releases the pages of a packet gather list. We do not own the last + * page on the list and do not free it. + */ +void t4_pktgl_free(const struct pkt_gl *gl) +{ + int n; + const skb_frag_t *p; + + for (p = gl->frags, n = gl->nfrags - 1; n--; p++) + put_page(p->page); +} + +/* + * Process an MPS trace packet. Give it an unused protocol number so it won't + * be delivered to anyone and send it to the stack for capture. + */ +static noinline int handle_trace_pkt(struct adapter *adap, + const struct pkt_gl *gl) +{ + struct sk_buff *skb; + struct cpl_trace_pkt *p; + + skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN); + if (unlikely(!skb)) { + t4_pktgl_free(gl); + return 0; + } + + p = (struct cpl_trace_pkt *)skb->data; + __skb_pull(skb, sizeof(*p)); + skb_reset_mac_header(skb); + skb->protocol = htons(0xffff); + skb->dev = adap->port[0]; + netif_receive_skb(skb); + return 0; +} + +static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, + const struct cpl_rx_pkt *pkt) +{ + int ret; + struct sk_buff *skb; + + skb = napi_get_frags(&rxq->rspq.napi); + if (unlikely(!skb)) { + t4_pktgl_free(gl); + rxq->stats.rx_drops++; + return; + } + + copy_frags(skb_shinfo(skb), gl, RX_PKT_PAD); + skb->len = gl->tot_len - RX_PKT_PAD; + skb->data_len = skb->len; + skb->truesize += skb->data_len; + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb_record_rx_queue(skb, rxq->rspq.idx); + + if (unlikely(pkt->vlan_ex)) { + struct port_info *pi = netdev_priv(rxq->rspq.netdev); + struct vlan_group *grp = pi->vlan_grp; + + rxq->stats.vlan_ex++; + if (likely(grp)) { + ret = vlan_gro_frags(&rxq->rspq.napi, grp, + ntohs(pkt->vlan)); + goto stats; + } + } + ret = napi_gro_frags(&rxq->rspq.napi); +stats: if (ret == GRO_HELD) + rxq->stats.lro_pkts++; + else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE) + rxq->stats.lro_merged++; + rxq->stats.pkts++; + rxq->stats.rx_cso++; +} + +/** + * t4_ethrx_handler - process an ingress ethernet packet + * @q: the response queue that received the packet + * @rsp: the response queue descriptor holding the RX_PKT message + * @si: the gather list of packet fragments + * + * Process an ingress ethernet packet and deliver it to the stack. + */ +int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *si) +{ + bool csum_ok; + struct sk_buff *skb; + struct port_info *pi; + const struct cpl_rx_pkt *pkt; + struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); + + if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT)) + return handle_trace_pkt(q->adap, si); + + pkt = (void *)&rsp[1]; + csum_ok = pkt->csum_calc && !pkt->err_vec; + if ((pkt->l2info & htonl(RXF_TCP)) && + (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { + do_gro(rxq, si, pkt); + return 0; + } + + skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN); + if (unlikely(!skb)) { + t4_pktgl_free(si); + rxq->stats.rx_drops++; + return 0; + } + + __skb_pull(skb, RX_PKT_PAD); /* remove ethernet header padding */ + skb->protocol = eth_type_trans(skb, q->netdev); + skb_record_rx_queue(skb, q->idx); + pi = netdev_priv(skb->dev); + rxq->stats.pkts++; + + if (csum_ok && (pi->rx_offload & RX_CSO) && + (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) { + if (!pkt->ip_frag) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else { + __sum16 c = (__force __sum16)pkt->csum; + skb->csum = csum_unfold(c); + skb->ip_summed = CHECKSUM_COMPLETE; + } + rxq->stats.rx_cso++; + } else + skb->ip_summed = CHECKSUM_NONE; + + if (unlikely(pkt->vlan_ex)) { + struct vlan_group *grp = pi->vlan_grp; + + rxq->stats.vlan_ex++; + if (likely(grp)) + vlan_hwaccel_receive_skb(skb, grp, ntohs(pkt->vlan)); + else + dev_kfree_skb_any(skb); + } else + netif_receive_skb(skb); + + return 0; +} + +/** + * restore_rx_bufs - put back a packet's Rx buffers + * @si: the packet gather list + * @q: the SGE free list + * @frags: number of FL buffers to restore + * + * Puts back on an FL the Rx buffers associated with @si. The buffers + * have already been unmapped and are left unmapped, we mark them so to + * prevent further unmapping attempts. + * + * This function undoes a series of @unmap_rx_buf calls when we find out + * that the current packet can't be processed right away afterall and we + * need to come back to it later. This is a very rare event and there's + * no effort to make this particularly efficient. + */ +static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q, + int frags) +{ + struct rx_sw_desc *d; + + while (frags--) { + if (q->cidx == 0) + q->cidx = q->size - 1; + else + q->cidx--; + d = &q->sdesc[q->cidx]; + d->page = si->frags[frags].page; + d->dma_addr |= RX_UNMAPPED_BUF; + q->avail++; + } +} + +/** + * is_new_response - check if a response is newly written + * @r: the response descriptor + * @q: the response queue + * + * Returns true if a response descriptor contains a yet unprocessed + * response. + */ +static inline bool is_new_response(const struct rsp_ctrl *r, + const struct sge_rspq *q) +{ + return RSPD_GEN(r->type_gen) == q->gen; +} + +/** + * rspq_next - advance to the next entry in a response queue + * @q: the queue + * + * Updates the state of a response queue to advance it to the next entry. + */ +static inline void rspq_next(struct sge_rspq *q) +{ + q->cur_desc = (void *)q->cur_desc + q->iqe_len; + if (unlikely(++q->cidx == q->size)) { + q->cidx = 0; + q->gen ^= 1; + q->cur_desc = q->desc; + } +} + +/** + * process_responses - process responses from an SGE response queue + * @q: the ingress queue to process + * @budget: how many responses can be processed in this round + * + * Process responses from an SGE response queue up to the supplied budget. + * Responses include received packets as well as control messages from FW + * or HW. + * + * Additionally choose the interrupt holdoff time for the next interrupt + * on this queue. If the system is under memory shortage use a fairly + * long delay to help recovery. + */ +static int process_responses(struct sge_rspq *q, int budget) +{ + int ret, rsp_type; + int budget_left = budget; + const struct rsp_ctrl *rc; + struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); + + while (likely(budget_left)) { + rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); + if (!is_new_response(rc, q)) + break; + + rmb(); + rsp_type = RSPD_TYPE(rc->type_gen); + if (likely(rsp_type == RSP_TYPE_FLBUF)) { + skb_frag_t *fp; + struct pkt_gl si; + const struct rx_sw_desc *rsd; + u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags; + + if (len & RSPD_NEWBUF) { + if (likely(q->offset > 0)) { + free_rx_bufs(q->adap, &rxq->fl, 1); + q->offset = 0; + } + len &= RSPD_LEN; + } + si.tot_len = len; + + /* gather packet fragments */ + for (frags = 0, fp = si.frags; ; frags++, fp++) { + rsd = &rxq->fl.sdesc[rxq->fl.cidx]; + bufsz = get_buf_size(rsd); + fp->page = rsd->page; + fp->page_offset = q->offset; + fp->size = min(bufsz, len); + len -= fp->size; + if (!len) + break; + unmap_rx_buf(q->adap, &rxq->fl); + } + + /* + * Last buffer remains mapped so explicitly make it + * coherent for CPU access. + */ + dma_sync_single_for_cpu(q->adap->pdev_dev, + get_buf_addr(rsd), + fp->size, DMA_FROM_DEVICE); + + si.va = page_address(si.frags[0].page) + + si.frags[0].page_offset; + prefetch(si.va); + + si.nfrags = frags + 1; + ret = q->handler(q, q->cur_desc, &si); + if (likely(ret == 0)) + q->offset += ALIGN(fp->size, FL_ALIGN); + else + restore_rx_bufs(&si, &rxq->fl, frags); + } else if (likely(rsp_type == RSP_TYPE_CPL)) { + ret = q->handler(q, q->cur_desc, NULL); + } else { + ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN); + } + + if (unlikely(ret)) { + /* couldn't process descriptor, back off for recovery */ + q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX); + break; + } + + rspq_next(q); + budget_left--; + } + + if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16) + __refill_fl(q->adap, &rxq->fl); + return budget - budget_left; +} + +/** + * napi_rx_handler - the NAPI handler for Rx processing + * @napi: the napi instance + * @budget: how many packets we can process in this round + * + * Handler for new data events when using NAPI. This does not need any + * locking or protection from interrupts as data interrupts are off at + * this point and other adapter interrupts do not interfere (the latter + * in not a concern at all with MSI-X as non-data interrupts then have + * a separate handler). + */ +static int napi_rx_handler(struct napi_struct *napi, int budget) +{ + unsigned int params; + struct sge_rspq *q = container_of(napi, struct sge_rspq, napi); + int work_done = process_responses(q, budget); + + if (likely(work_done < budget)) { + napi_complete(napi); + params = q->next_intr_params; + q->next_intr_params = q->intr_params; + } else + params = QINTR_TIMER_IDX(7); + + t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS), CIDXINC(work_done) | + INGRESSQID((u32)q->cntxt_id) | SEINTARM(params)); + return work_done; +} + +/* + * The MSI-X interrupt handler for an SGE response queue. + */ +irqreturn_t t4_sge_intr_msix(int irq, void *cookie) +{ + struct sge_rspq *q = cookie; + + napi_schedule(&q->napi); + return IRQ_HANDLED; +} + +/* + * Process the indirect interrupt entries in the interrupt queue and kick off + * NAPI for each queue that has generated an entry. + */ +static unsigned int process_intrq(struct adapter *adap) +{ + unsigned int credits; + const struct rsp_ctrl *rc; + struct sge_rspq *q = &adap->sge.intrq; + + spin_lock(&adap->sge.intrq_lock); + for (credits = 0; ; credits++) { + rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); + if (!is_new_response(rc, q)) + break; + + rmb(); + if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) { + unsigned int qid = ntohl(rc->pldbuflen_qid); + + napi_schedule(&adap->sge.ingr_map[qid]->napi); + } + + rspq_next(q); + } + + t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), CIDXINC(credits) | + INGRESSQID(q->cntxt_id) | SEINTARM(q->intr_params)); + spin_unlock(&adap->sge.intrq_lock); + return credits; +} + +/* + * The MSI interrupt handler, which handles data events from SGE response queues + * as well as error and other async events as they all use the same MSI vector. + */ +static irqreturn_t t4_intr_msi(int irq, void *cookie) +{ + struct adapter *adap = cookie; + + t4_slow_intr_handler(adap); + process_intrq(adap); + return IRQ_HANDLED; +} + +/* + * Interrupt handler for legacy INTx interrupts. + * Handles data events from SGE response queues as well as error and other + * async events as they all use the same interrupt line. + */ +static irqreturn_t t4_intr_intx(int irq, void *cookie) +{ + struct adapter *adap = cookie; + + t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI), 0); + if (t4_slow_intr_handler(adap) | process_intrq(adap)) + return IRQ_HANDLED; + return IRQ_NONE; /* probably shared interrupt */ +} + +/** + * t4_intr_handler - select the top-level interrupt handler + * @adap: the adapter + * + * Selects the top-level interrupt handler based on the type of interrupts + * (MSI-X, MSI, or INTx). + */ +irq_handler_t t4_intr_handler(struct adapter *adap) +{ + if (adap->flags & USING_MSIX) + return t4_sge_intr_msix; + if (adap->flags & USING_MSI) + return t4_intr_msi; + return t4_intr_intx; +} + +static void sge_rx_timer_cb(unsigned long data) +{ + unsigned long m; + unsigned int i, cnt[2]; + struct adapter *adap = (struct adapter *)data; + struct sge *s = &adap->sge; + + for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) + for (m = s->starving_fl[i]; m; m &= m - 1) { + struct sge_eth_rxq *rxq; + unsigned int id = __ffs(m) + i * BITS_PER_LONG; + struct sge_fl *fl = s->egr_map[id]; + + clear_bit(id, s->starving_fl); + smp_mb__after_clear_bit(); + + if (fl_starving(fl)) { + rxq = container_of(fl, struct sge_eth_rxq, fl); + if (napi_reschedule(&rxq->rspq.napi)) + fl->starving++; + else + set_bit(id, s->starving_fl); + } + } + + t4_write_reg(adap, SGE_DEBUG_INDEX, 13); + cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH); + cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW); + + for (i = 0; i < 2; i++) + if (cnt[i] >= s->starve_thres) { + if (s->idma_state[i] || cnt[i] == 0xffffffff) + continue; + s->idma_state[i] = 1; + t4_write_reg(adap, SGE_DEBUG_INDEX, 11); + m = t4_read_reg(adap, SGE_DEBUG_DATA_LOW) >> (i * 16); + dev_warn(adap->pdev_dev, + "SGE idma%u starvation detected for " + "queue %lu\n", i, m & 0xffff); + } else if (s->idma_state[i]) + s->idma_state[i] = 0; + + mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD); +} + +static void sge_tx_timer_cb(unsigned long data) +{ + unsigned long m; + unsigned int i, budget; + struct adapter *adap = (struct adapter *)data; + struct sge *s = &adap->sge; + + for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++) + for (m = s->txq_maperr[i]; m; m &= m - 1) { + unsigned long id = __ffs(m) + i * BITS_PER_LONG; + struct sge_ofld_txq *txq = s->egr_map[id]; + + clear_bit(id, s->txq_maperr); + tasklet_schedule(&txq->qresume_tsk); + } + + budget = MAX_TIMER_TX_RECLAIM; + i = s->ethtxq_rover; + do { + struct sge_eth_txq *q = &s->ethtxq[i]; + + if (q->q.in_use && + time_after_eq(jiffies, q->txq->trans_start + HZ / 100) && + __netif_tx_trylock(q->txq)) { + int avail = reclaimable(&q->q); + + if (avail) { + if (avail > budget) + avail = budget; + + free_tx_desc(adap, &q->q, avail, true); + q->q.in_use -= avail; + budget -= avail; + } + __netif_tx_unlock(q->txq); + } + + if (++i >= s->ethqsets) + i = 0; + } while (budget && i != s->ethtxq_rover); + s->ethtxq_rover = i; + mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2)); +} + +int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, + struct net_device *dev, int intr_idx, + struct sge_fl *fl, rspq_handler_t hnd) +{ + int ret, flsz = 0; + struct fw_iq_cmd c; + struct port_info *pi = netdev_priv(dev); + + /* Size needs to be multiple of 16, including status entry. */ + iq->size = roundup(iq->size, 16); + + iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0, + &iq->phys_addr, NULL, 0); + if (!iq->desc) + return -ENOMEM; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_CMD_EXEC | + FW_IQ_CMD_PFN(0) | FW_IQ_CMD_VFN(0)); + c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | FW_IQ_CMD_IQSTART(1) | + FW_LEN16(c)); + c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | + FW_IQ_CMD_IQASYNCH(fwevtq) | FW_IQ_CMD_VIID(pi->viid) | + FW_IQ_CMD_IQANDST(intr_idx < 0) | FW_IQ_CMD_IQANUD(1) | + FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx : + -intr_idx - 1)); + c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH(pi->tx_chan) | + FW_IQ_CMD_IQGTSMODE | + FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) | + FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4)); + c.iqsize = htons(iq->size); + c.iqaddr = cpu_to_be64(iq->phys_addr); + + if (fl) { + fl->size = roundup(fl->size, 8); + fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64), + sizeof(struct rx_sw_desc), &fl->addr, + &fl->sdesc, STAT_LEN); + if (!fl->desc) + goto fl_nomem; + + flsz = fl->size / 8 + STAT_LEN / sizeof(struct tx_desc); + c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN | + FW_IQ_CMD_FL0PADEN); + c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) | + FW_IQ_CMD_FL0FBMAX(3)); + c.fl0size = htons(flsz); + c.fl0addr = cpu_to_be64(fl->addr); + } + + ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c); + if (ret) + goto err; + + netif_napi_add(dev, &iq->napi, napi_rx_handler, 64); + iq->cur_desc = iq->desc; + iq->cidx = 0; + iq->gen = 1; + iq->next_intr_params = iq->intr_params; + iq->cntxt_id = ntohs(c.iqid); + iq->abs_id = ntohs(c.physiqid); + iq->size--; /* subtract status entry */ + iq->adap = adap; + iq->netdev = dev; + iq->handler = hnd; + + /* set offset to -1 to distinguish ingress queues without FL */ + iq->offset = fl ? 0 : -1; + + adap->sge.ingr_map[iq->cntxt_id] = iq; + + if (fl) { + fl->cntxt_id = htons(c.fl0id); + fl->avail = fl->pend_cred = 0; + fl->pidx = fl->cidx = 0; + fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0; + adap->sge.egr_map[fl->cntxt_id] = fl; + refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL); + } + return 0; + +fl_nomem: + ret = -ENOMEM; +err: + if (iq->desc) { + dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len, + iq->desc, iq->phys_addr); + iq->desc = NULL; + } + if (fl && fl->desc) { + kfree(fl->sdesc); + fl->sdesc = NULL; + dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc), + fl->desc, fl->addr); + fl->desc = NULL; + } + return ret; +} + +static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) +{ + q->in_use = 0; + q->cidx = q->pidx = 0; + q->stops = q->restarts = 0; + q->stat = (void *)&q->desc[q->size]; + q->cntxt_id = id; + adap->sge.egr_map[id] = q; +} + +int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, + struct net_device *dev, struct netdev_queue *netdevq, + unsigned int iqid) +{ + int ret, nentries; + struct fw_eq_eth_cmd c; + struct port_info *pi = netdev_priv(dev); + + /* Add status entries */ + nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); + + txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, + sizeof(struct tx_desc), sizeof(struct tx_sw_desc), + &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN); + if (!txq->q.desc) + return -ENOMEM; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_CMD_EXEC | + FW_EQ_ETH_CMD_PFN(0) | FW_EQ_ETH_CMD_VFN(0)); + c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC | + FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); + c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid)); + c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) | + FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) | + FW_EQ_ETH_CMD_IQID(iqid)); + c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN(2) | + FW_EQ_ETH_CMD_FBMAX(3) | + FW_EQ_ETH_CMD_CIDXFTHRESH(5) | + FW_EQ_ETH_CMD_EQSIZE(nentries)); + c.eqaddr = cpu_to_be64(txq->q.phys_addr); + + ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c); + if (ret) { + kfree(txq->q.sdesc); + txq->q.sdesc = NULL; + dma_free_coherent(adap->pdev_dev, + nentries * sizeof(struct tx_desc), + txq->q.desc, txq->q.phys_addr); + txq->q.desc = NULL; + return ret; + } + + init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_GET(ntohl(c.eqid_pkd))); + txq->txq = netdevq; + txq->tso = txq->tx_cso = txq->vlan_ins = 0; + txq->mapping_err = 0; + return 0; +} + +int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, + struct net_device *dev, unsigned int iqid, + unsigned int cmplqid) +{ + int ret, nentries; + struct fw_eq_ctrl_cmd c; + struct port_info *pi = netdev_priv(dev); + + /* Add status entries */ + nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); + + txq->q.desc = alloc_ring(adap->pdev_dev, nentries, + sizeof(struct tx_desc), 0, &txq->q.phys_addr, + NULL, 0); + if (!txq->q.desc) + return -ENOMEM; + + c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_CMD_EXEC | + FW_EQ_CTRL_CMD_PFN(0) | FW_EQ_CTRL_CMD_VFN(0)); + c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC | + FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c)); + c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID(cmplqid)); + c.physeqid_pkd = htonl(0); + c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE(2) | + FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) | + FW_EQ_CTRL_CMD_IQID(iqid)); + c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN(2) | + FW_EQ_CTRL_CMD_FBMAX(3) | + FW_EQ_CTRL_CMD_CIDXFTHRESH(5) | + FW_EQ_CTRL_CMD_EQSIZE(nentries)); + c.eqaddr = cpu_to_be64(txq->q.phys_addr); + + ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c); + if (ret) { + dma_free_coherent(adap->pdev_dev, + nentries * sizeof(struct tx_desc), + txq->q.desc, txq->q.phys_addr); + txq->q.desc = NULL; + return ret; + } + + init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_GET(ntohl(c.cmpliqid_eqid))); + txq->adap = adap; + skb_queue_head_init(&txq->sendq); + tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq); + txq->full = 0; + return 0; +} + +int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, + struct net_device *dev, unsigned int iqid) +{ + int ret, nentries; + struct fw_eq_ofld_cmd c; + struct port_info *pi = netdev_priv(dev); + + /* Add status entries */ + nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); + + txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, + sizeof(struct tx_desc), sizeof(struct tx_sw_desc), + &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN); + if (!txq->q.desc) + return -ENOMEM; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | FW_CMD_EXEC | + FW_EQ_OFLD_CMD_PFN(0) | FW_EQ_OFLD_CMD_VFN(0)); + c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC | + FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c)); + c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) | + FW_EQ_OFLD_CMD_PCIECHN(pi->tx_chan) | + FW_EQ_OFLD_CMD_IQID(iqid)); + c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN(2) | + FW_EQ_OFLD_CMD_FBMAX(3) | + FW_EQ_OFLD_CMD_CIDXFTHRESH(5) | + FW_EQ_OFLD_CMD_EQSIZE(nentries)); + c.eqaddr = cpu_to_be64(txq->q.phys_addr); + + ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c); + if (ret) { + kfree(txq->q.sdesc); + txq->q.sdesc = NULL; + dma_free_coherent(adap->pdev_dev, + nentries * sizeof(struct tx_desc), + txq->q.desc, txq->q.phys_addr); + txq->q.desc = NULL; + return ret; + } + + init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_GET(ntohl(c.eqid_pkd))); + txq->adap = adap; + skb_queue_head_init(&txq->sendq); + tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq); + txq->full = 0; + txq->mapping_err = 0; + return 0; +} + +static void free_txq(struct adapter *adap, struct sge_txq *q) +{ + dma_free_coherent(adap->pdev_dev, + q->size * sizeof(struct tx_desc) + STAT_LEN, + q->desc, q->phys_addr); + q->cntxt_id = 0; + q->sdesc = NULL; + q->desc = NULL; +} + +static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, + struct sge_fl *fl) +{ + unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; + + adap->sge.ingr_map[rq->cntxt_id] = NULL; + t4_iq_free(adap, 0, 0, 0, FW_IQ_TYPE_FL_INT_CAP, rq->cntxt_id, fl_id, + 0xffff); + dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, + rq->desc, rq->phys_addr); + netif_napi_del(&rq->napi); + rq->netdev = NULL; + rq->cntxt_id = rq->abs_id = 0; + rq->desc = NULL; + + if (fl) { + free_rx_bufs(adap, fl, fl->avail); + dma_free_coherent(adap->pdev_dev, fl->size * 8 + STAT_LEN, + fl->desc, fl->addr); + kfree(fl->sdesc); + fl->sdesc = NULL; + fl->cntxt_id = 0; + fl->desc = NULL; + } +} + +/** + * t4_free_sge_resources - free SGE resources + * @adap: the adapter + * + * Frees resources used by the SGE queue sets. + */ +void t4_free_sge_resources(struct adapter *adap) +{ + int i; + struct sge_eth_rxq *eq = adap->sge.ethrxq; + struct sge_eth_txq *etq = adap->sge.ethtxq; + struct sge_ofld_rxq *oq = adap->sge.ofldrxq; + + /* clean up Ethernet Tx/Rx queues */ + for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) { + if (eq->rspq.desc) + free_rspq_fl(adap, &eq->rspq, &eq->fl); + if (etq->q.desc) { + t4_eth_eq_free(adap, 0, 0, 0, etq->q.cntxt_id); + free_tx_desc(adap, &etq->q, etq->q.in_use, true); + kfree(etq->q.sdesc); + free_txq(adap, &etq->q); + } + } + + /* clean up RDMA and iSCSI Rx queues */ + for (i = 0; i < adap->sge.ofldqsets; i++, oq++) { + if (oq->rspq.desc) + free_rspq_fl(adap, &oq->rspq, &oq->fl); + } + for (i = 0, oq = adap->sge.rdmarxq; i < adap->sge.rdmaqs; i++, oq++) { + if (oq->rspq.desc) + free_rspq_fl(adap, &oq->rspq, &oq->fl); + } + + /* clean up offload Tx queues */ + for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) { + struct sge_ofld_txq *q = &adap->sge.ofldtxq[i]; + + if (q->q.desc) { + tasklet_kill(&q->qresume_tsk); + t4_ofld_eq_free(adap, 0, 0, 0, q->q.cntxt_id); + free_tx_desc(adap, &q->q, q->q.in_use, false); + kfree(q->q.sdesc); + __skb_queue_purge(&q->sendq); + free_txq(adap, &q->q); + } + } + + /* clean up control Tx queues */ + for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) { + struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i]; + + if (cq->q.desc) { + tasklet_kill(&cq->qresume_tsk); + t4_ctrl_eq_free(adap, 0, 0, 0, cq->q.cntxt_id); + __skb_queue_purge(&cq->sendq); + free_txq(adap, &cq->q); + } + } + + if (adap->sge.fw_evtq.desc) + free_rspq_fl(adap, &adap->sge.fw_evtq, NULL); + + if (adap->sge.intrq.desc) + free_rspq_fl(adap, &adap->sge.intrq, NULL); + + /* clear the reverse egress queue map */ + memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map)); +} + +void t4_sge_start(struct adapter *adap) +{ + adap->sge.ethtxq_rover = 0; + mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD); + mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD); +} + +/** + * t4_sge_stop - disable SGE operation + * @adap: the adapter + * + * Stop tasklets and timers associated with the DMA engine. Note that + * this is effective only if measures have been taken to disable any HW + * events that may restart them. + */ +void t4_sge_stop(struct adapter *adap) +{ + int i; + struct sge *s = &adap->sge; + + if (in_interrupt()) /* actions below require waiting */ + return; + + if (s->rx_timer.function) + del_timer_sync(&s->rx_timer); + if (s->tx_timer.function) + del_timer_sync(&s->tx_timer); + + for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) { + struct sge_ofld_txq *q = &s->ofldtxq[i]; + + if (q->q.desc) + tasklet_kill(&q->qresume_tsk); + } + for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) { + struct sge_ctrl_txq *cq = &s->ctrlq[i]; + + if (cq->q.desc) + tasklet_kill(&cq->qresume_tsk); + } +} + +/** + * t4_sge_init - initialize SGE + * @adap: the adapter + * + * Performs SGE initialization needed every time after a chip reset. + * We do not initialize any of the queues here, instead the driver + * top-level must request them individually. + */ +void t4_sge_init(struct adapter *adap) +{ + struct sge *s = &adap->sge; + unsigned int fl_align_log = ilog2(FL_ALIGN); + + t4_set_reg_field(adap, SGE_CONTROL, PKTSHIFT_MASK | + INGPADBOUNDARY_MASK | EGRSTATUSPAGESIZE, + INGPADBOUNDARY(fl_align_log - 5) | PKTSHIFT(2) | + RXPKTCPLMODE | + (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0)); + t4_set_reg_field(adap, SGE_HOST_PAGE_SIZE, HOSTPAGESIZEPF0_MASK, + HOSTPAGESIZEPF0(PAGE_SHIFT - 10)); + t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, PAGE_SIZE); +#if FL_PG_ORDER > 0 + t4_write_reg(adap, SGE_FL_BUFFER_SIZE1, PAGE_SIZE << FL_PG_ORDER); +#endif + t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD, + THRESHOLD_0(s->counter_val[0]) | + THRESHOLD_1(s->counter_val[1]) | + THRESHOLD_2(s->counter_val[2]) | + THRESHOLD_3(s->counter_val[3])); + t4_write_reg(adap, SGE_TIMER_VALUE_0_AND_1, + TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) | + TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1]))); + t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3, + TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[2])) | + TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[3]))); + t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5, + TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[4])) | + TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[5]))); + setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap); + setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap); + s->starve_thres = core_ticks_per_usec(adap) * 1000000; /* 1 s */ + s->idma_state[0] = s->idma_state[1] = 0; + spin_lock_init(&s->intrq_lock); +} -- cgit v1.2.3 From 625ba2c2eed763fad9c3f51318cbe8e1917b9fc8 Mon Sep 17 00:00:00 2001 From: Dimitris Michailidis Date: Thu, 1 Apr 2010 15:28:25 +0000 Subject: cxgb4: Add remaining driver headers and L2T management Signed-off-by: Dimitris Michailidis Signed-off-by: David S. Miller --- drivers/net/cxgb4/cxgb4.h | 741 ++++++++++++++++++++++++++++++++++++++++++ drivers/net/cxgb4/cxgb4_uld.h | 239 ++++++++++++++ drivers/net/cxgb4/l2t.c | 624 +++++++++++++++++++++++++++++++++++ drivers/net/cxgb4/l2t.h | 110 +++++++ 4 files changed, 1714 insertions(+) create mode 100644 drivers/net/cxgb4/cxgb4.h create mode 100644 drivers/net/cxgb4/cxgb4_uld.h create mode 100644 drivers/net/cxgb4/l2t.c create mode 100644 drivers/net/cxgb4/l2t.h diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h new file mode 100644 index 000000000000..3d8ff4889b56 --- /dev/null +++ b/drivers/net/cxgb4/cxgb4.h @@ -0,0 +1,741 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CXGB4_H__ +#define __CXGB4_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "cxgb4_uld.h" +#include "t4_hw.h" + +#define FW_VERSION_MAJOR 1 +#define FW_VERSION_MINOR 1 +#define FW_VERSION_MICRO 0 + +enum { + MAX_NPORTS = 4, /* max # of ports */ + SERNUM_LEN = 16, /* Serial # length */ + EC_LEN = 16, /* E/C length */ + ID_LEN = 16, /* ID length */ +}; + +enum { + MEM_EDC0, + MEM_EDC1, + MEM_MC +}; + +enum dev_master { + MASTER_CANT, + MASTER_MAY, + MASTER_MUST +}; + +enum dev_state { + DEV_STATE_UNINIT, + DEV_STATE_INIT, + DEV_STATE_ERR +}; + +enum { + PAUSE_RX = 1 << 0, + PAUSE_TX = 1 << 1, + PAUSE_AUTONEG = 1 << 2 +}; + +struct port_stats { + u64 tx_octets; /* total # of octets in good frames */ + u64 tx_frames; /* all good frames */ + u64 tx_bcast_frames; /* all broadcast frames */ + u64 tx_mcast_frames; /* all multicast frames */ + u64 tx_ucast_frames; /* all unicast frames */ + u64 tx_error_frames; /* all error frames */ + + u64 tx_frames_64; /* # of Tx frames in a particular range */ + u64 tx_frames_65_127; + u64 tx_frames_128_255; + u64 tx_frames_256_511; + u64 tx_frames_512_1023; + u64 tx_frames_1024_1518; + u64 tx_frames_1519_max; + + u64 tx_drop; /* # of dropped Tx frames */ + u64 tx_pause; /* # of transmitted pause frames */ + u64 tx_ppp0; /* # of transmitted PPP prio 0 frames */ + u64 tx_ppp1; /* # of transmitted PPP prio 1 frames */ + u64 tx_ppp2; /* # of transmitted PPP prio 2 frames */ + u64 tx_ppp3; /* # of transmitted PPP prio 3 frames */ + u64 tx_ppp4; /* # of transmitted PPP prio 4 frames */ + u64 tx_ppp5; /* # of transmitted PPP prio 5 frames */ + u64 tx_ppp6; /* # of transmitted PPP prio 6 frames */ + u64 tx_ppp7; /* # of transmitted PPP prio 7 frames */ + + u64 rx_octets; /* total # of octets in good frames */ + u64 rx_frames; /* all good frames */ + u64 rx_bcast_frames; /* all broadcast frames */ + u64 rx_mcast_frames; /* all multicast frames */ + u64 rx_ucast_frames; /* all unicast frames */ + u64 rx_too_long; /* # of frames exceeding MTU */ + u64 rx_jabber; /* # of jabber frames */ + u64 rx_fcs_err; /* # of received frames with bad FCS */ + u64 rx_len_err; /* # of received frames with length error */ + u64 rx_symbol_err; /* symbol errors */ + u64 rx_runt; /* # of short frames */ + + u64 rx_frames_64; /* # of Rx frames in a particular range */ + u64 rx_frames_65_127; + u64 rx_frames_128_255; + u64 rx_frames_256_511; + u64 rx_frames_512_1023; + u64 rx_frames_1024_1518; + u64 rx_frames_1519_max; + + u64 rx_pause; /* # of received pause frames */ + u64 rx_ppp0; /* # of received PPP prio 0 frames */ + u64 rx_ppp1; /* # of received PPP prio 1 frames */ + u64 rx_ppp2; /* # of received PPP prio 2 frames */ + u64 rx_ppp3; /* # of received PPP prio 3 frames */ + u64 rx_ppp4; /* # of received PPP prio 4 frames */ + u64 rx_ppp5; /* # of received PPP prio 5 frames */ + u64 rx_ppp6; /* # of received PPP prio 6 frames */ + u64 rx_ppp7; /* # of received PPP prio 7 frames */ + + u64 rx_ovflow0; /* drops due to buffer-group 0 overflows */ + u64 rx_ovflow1; /* drops due to buffer-group 1 overflows */ + u64 rx_ovflow2; /* drops due to buffer-group 2 overflows */ + u64 rx_ovflow3; /* drops due to buffer-group 3 overflows */ + u64 rx_trunc0; /* buffer-group 0 truncated packets */ + u64 rx_trunc1; /* buffer-group 1 truncated packets */ + u64 rx_trunc2; /* buffer-group 2 truncated packets */ + u64 rx_trunc3; /* buffer-group 3 truncated packets */ +}; + +struct lb_port_stats { + u64 octets; + u64 frames; + u64 bcast_frames; + u64 mcast_frames; + u64 ucast_frames; + u64 error_frames; + + u64 frames_64; + u64 frames_65_127; + u64 frames_128_255; + u64 frames_256_511; + u64 frames_512_1023; + u64 frames_1024_1518; + u64 frames_1519_max; + + u64 drop; + + u64 ovflow0; + u64 ovflow1; + u64 ovflow2; + u64 ovflow3; + u64 trunc0; + u64 trunc1; + u64 trunc2; + u64 trunc3; +}; + +struct tp_tcp_stats { + u32 tcpOutRsts; + u64 tcpInSegs; + u64 tcpOutSegs; + u64 tcpRetransSegs; +}; + +struct tp_err_stats { + u32 macInErrs[4]; + u32 hdrInErrs[4]; + u32 tcpInErrs[4]; + u32 tnlCongDrops[4]; + u32 ofldChanDrops[4]; + u32 tnlTxDrops[4]; + u32 ofldVlanDrops[4]; + u32 tcp6InErrs[4]; + u32 ofldNoNeigh; + u32 ofldCongDefer; +}; + +struct tp_params { + unsigned int ntxchan; /* # of Tx channels */ + unsigned int tre; /* log2 of core clocks per TP tick */ +}; + +struct vpd_params { + unsigned int cclk; + u8 ec[EC_LEN + 1]; + u8 sn[SERNUM_LEN + 1]; + u8 id[ID_LEN + 1]; +}; + +struct pci_params { + unsigned char speed; + unsigned char width; +}; + +struct adapter_params { + struct tp_params tp; + struct vpd_params vpd; + struct pci_params pci; + + unsigned int fw_vers; + unsigned int tp_vers; + u8 api_vers[7]; + + unsigned short mtus[NMTUS]; + unsigned short a_wnd[NCCTRL_WIN]; + unsigned short b_wnd[NCCTRL_WIN]; + + unsigned char nports; /* # of ethernet ports */ + unsigned char portvec; + unsigned char rev; /* chip revision */ + unsigned char offload; + + unsigned int ofldq_wr_cred; +}; + +struct trace_params { + u32 data[TRACE_LEN / 4]; + u32 mask[TRACE_LEN / 4]; + unsigned short snap_len; + unsigned short min_len; + unsigned char skip_ofst; + unsigned char skip_len; + unsigned char invert; + unsigned char port; +}; + +struct link_config { + unsigned short supported; /* link capabilities */ + unsigned short advertising; /* advertised capabilities */ + unsigned short requested_speed; /* speed user has requested */ + unsigned short speed; /* actual link speed */ + unsigned char requested_fc; /* flow control user has requested */ + unsigned char fc; /* actual link flow control */ + unsigned char autoneg; /* autonegotiating? */ + unsigned char link_ok; /* link up? */ +}; + +#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16) + +enum { + MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */ + MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */ + MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */ + MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */ +}; + +enum { + MAX_EGRQ = 128, /* max # of egress queues, including FLs */ + MAX_INGQ = 64 /* max # of interrupt-capable ingress queues */ +}; + +struct adapter; +struct vlan_group; +struct sge_rspq; + +struct port_info { + struct adapter *adapter; + struct vlan_group *vlan_grp; + u16 viid; + s16 xact_addr_filt; /* index of exact MAC address filter */ + u16 rss_size; /* size of VI's RSS table slice */ + s8 mdio_addr; + u8 port_type; + u8 mod_type; + u8 port_id; + u8 tx_chan; + u8 lport; /* associated offload logical port */ + u8 rx_offload; /* CSO, etc */ + u8 nqsets; /* # of qsets */ + u8 first_qset; /* index of first qset */ + struct link_config link_cfg; +}; + +/* port_info.rx_offload flags */ +enum { + RX_CSO = 1 << 0, +}; + +struct dentry; +struct work_struct; + +enum { /* adapter flags */ + FULL_INIT_DONE = (1 << 0), + USING_MSI = (1 << 1), + USING_MSIX = (1 << 2), + QUEUES_BOUND = (1 << 3), + FW_OK = (1 << 4), +}; + +struct rx_sw_desc; + +struct sge_fl { /* SGE free-buffer queue state */ + unsigned int avail; /* # of available Rx buffers */ + unsigned int pend_cred; /* new buffers since last FL DB ring */ + unsigned int cidx; /* consumer index */ + unsigned int pidx; /* producer index */ + unsigned long alloc_failed; /* # of times buffer allocation failed */ + unsigned long large_alloc_failed; + unsigned long starving; + /* RO fields */ + unsigned int cntxt_id; /* SGE context id for the free list */ + unsigned int size; /* capacity of free list */ + struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ + __be64 *desc; /* address of HW Rx descriptor ring */ + dma_addr_t addr; /* bus address of HW ring start */ +}; + +/* A packet gather list */ +struct pkt_gl { + skb_frag_t frags[MAX_SKB_FRAGS]; + void *va; /* virtual address of first byte */ + unsigned int nfrags; /* # of fragments */ + unsigned int tot_len; /* total length of fragments */ +}; + +typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *gl); + +struct sge_rspq { /* state for an SGE response queue */ + struct napi_struct napi; + const __be64 *cur_desc; /* current descriptor in queue */ + unsigned int cidx; /* consumer index */ + u8 gen; /* current generation bit */ + u8 intr_params; /* interrupt holdoff parameters */ + u8 next_intr_params; /* holdoff params for next interrupt */ + u8 pktcnt_idx; /* interrupt packet threshold */ + u8 uld; /* ULD handling this queue */ + u8 idx; /* queue index within its group */ + int offset; /* offset into current Rx buffer */ + u16 cntxt_id; /* SGE context id for the response q */ + u16 abs_id; /* absolute SGE id for the response q */ + __be64 *desc; /* address of HW response ring */ + dma_addr_t phys_addr; /* physical address of the ring */ + unsigned int iqe_len; /* entry size */ + unsigned int size; /* capacity of response queue */ + struct adapter *adap; + struct net_device *netdev; /* associated net device */ + rspq_handler_t handler; +}; + +struct sge_eth_stats { /* Ethernet queue statistics */ + unsigned long pkts; /* # of ethernet packets */ + unsigned long lro_pkts; /* # of LRO super packets */ + unsigned long lro_merged; /* # of wire packets merged by LRO */ + unsigned long rx_cso; /* # of Rx checksum offloads */ + unsigned long vlan_ex; /* # of Rx VLAN extractions */ + unsigned long rx_drops; /* # of packets dropped due to no mem */ +}; + +struct sge_eth_rxq { /* SW Ethernet Rx queue */ + struct sge_rspq rspq; + struct sge_fl fl; + struct sge_eth_stats stats; +} ____cacheline_aligned_in_smp; + +struct sge_ofld_stats { /* offload queue statistics */ + unsigned long pkts; /* # of packets */ + unsigned long imm; /* # of immediate-data packets */ + unsigned long an; /* # of asynchronous notifications */ + unsigned long nomem; /* # of responses deferred due to no mem */ +}; + +struct sge_ofld_rxq { /* SW offload Rx queue */ + struct sge_rspq rspq; + struct sge_fl fl; + struct sge_ofld_stats stats; +} ____cacheline_aligned_in_smp; + +struct tx_desc { + __be64 flit[8]; +}; + +struct tx_sw_desc; + +struct sge_txq { + unsigned int in_use; /* # of in-use Tx descriptors */ + unsigned int size; /* # of descriptors */ + unsigned int cidx; /* SW consumer index */ + unsigned int pidx; /* producer index */ + unsigned long stops; /* # of times q has been stopped */ + unsigned long restarts; /* # of queue restarts */ + unsigned int cntxt_id; /* SGE context id for the Tx q */ + struct tx_desc *desc; /* address of HW Tx descriptor ring */ + struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */ + struct sge_qstat *stat; /* queue status entry */ + dma_addr_t phys_addr; /* physical address of the ring */ +}; + +struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ + struct sge_txq q; + struct netdev_queue *txq; /* associated netdev TX queue */ + unsigned long tso; /* # of TSO requests */ + unsigned long tx_cso; /* # of Tx checksum offloads */ + unsigned long vlan_ins; /* # of Tx VLAN insertions */ + unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ +} ____cacheline_aligned_in_smp; + +struct sge_ofld_txq { /* state for an SGE offload Tx queue */ + struct sge_txq q; + struct adapter *adap; + struct sk_buff_head sendq; /* list of backpressured packets */ + struct tasklet_struct qresume_tsk; /* restarts the queue */ + u8 full; /* the Tx ring is full */ + unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ +} ____cacheline_aligned_in_smp; + +struct sge_ctrl_txq { /* state for an SGE control Tx queue */ + struct sge_txq q; + struct adapter *adap; + struct sk_buff_head sendq; /* list of backpressured packets */ + struct tasklet_struct qresume_tsk; /* restarts the queue */ + u8 full; /* the Tx ring is full */ +} ____cacheline_aligned_in_smp; + +struct sge { + struct sge_eth_txq ethtxq[MAX_ETH_QSETS]; + struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS]; + struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES]; + + struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; + struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS]; + struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES]; + struct sge_rspq fw_evtq ____cacheline_aligned_in_smp; + + struct sge_rspq intrq ____cacheline_aligned_in_smp; + spinlock_t intrq_lock; + + u16 max_ethqsets; /* # of available Ethernet queue sets */ + u16 ethqsets; /* # of active Ethernet queue sets */ + u16 ethtxq_rover; /* Tx queue to clean up next */ + u16 ofldqsets; /* # of active offload queue sets */ + u16 rdmaqs; /* # of available RDMA Rx queues */ + u16 ofld_rxq[MAX_OFLD_QSETS]; + u16 rdma_rxq[NCHAN]; + u16 timer_val[SGE_NTIMERS]; + u8 counter_val[SGE_NCOUNTERS]; + unsigned int starve_thres; + u8 idma_state[2]; + void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */ + struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */ + DECLARE_BITMAP(starving_fl, MAX_EGRQ); + DECLARE_BITMAP(txq_maperr, MAX_EGRQ); + struct timer_list rx_timer; /* refills starving FLs */ + struct timer_list tx_timer; /* checks Tx queues */ +}; + +#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++) +#define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++) +#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++) + +struct l2t_data; + +struct adapter { + void __iomem *regs; + struct pci_dev *pdev; + struct device *pdev_dev; + unsigned long registered_device_map; + unsigned long open_device_map; + unsigned long flags; + + const char *name; + int msg_enable; + + struct adapter_params params; + struct cxgb4_virt_res vres; + unsigned int swintr; + + unsigned int wol; + + struct { + unsigned short vec; + char desc[14]; + } msix_info[MAX_INGQ + 1]; + + struct sge sge; + + struct net_device *port[MAX_NPORTS]; + u8 chan_map[NCHAN]; /* channel -> port map */ + + struct l2t_data *l2t; + void *uld_handle[CXGB4_ULD_MAX]; + struct list_head list_node; + + struct tid_info tids; + void **tid_release_head; + spinlock_t tid_release_lock; + struct work_struct tid_release_task; + bool tid_release_task_busy; + + struct dentry *debugfs_root; + + spinlock_t stats_lock; +}; + +static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr) +{ + return readl(adap->regs + reg_addr); +} + +static inline void t4_write_reg(struct adapter *adap, u32 reg_addr, u32 val) +{ + writel(val, adap->regs + reg_addr); +} + +#ifndef readq +static inline u64 readq(const volatile void __iomem *addr) +{ + return readl(addr) + ((u64)readl(addr + 4) << 32); +} + +static inline void writeq(u64 val, volatile void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, addr + 4); +} +#endif + +static inline u64 t4_read_reg64(struct adapter *adap, u32 reg_addr) +{ + return readq(adap->regs + reg_addr); +} + +static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val) +{ + writeq(val, adap->regs + reg_addr); +} + +/** + * netdev2pinfo - return the port_info structure associated with a net_device + * @dev: the netdev + * + * Return the struct port_info associated with a net_device + */ +static inline struct port_info *netdev2pinfo(const struct net_device *dev) +{ + return netdev_priv(dev); +} + +/** + * adap2pinfo - return the port_info of a port + * @adap: the adapter + * @idx: the port index + * + * Return the port_info structure for the port of the given index. + */ +static inline struct port_info *adap2pinfo(struct adapter *adap, int idx) +{ + return netdev_priv(adap->port[idx]); +} + +/** + * netdev2adap - return the adapter structure associated with a net_device + * @dev: the netdev + * + * Return the struct adapter associated with a net_device + */ +static inline struct adapter *netdev2adap(const struct net_device *dev) +{ + return netdev2pinfo(dev)->adapter; +} + +void t4_os_portmod_changed(const struct adapter *adap, int port_id); +void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); + +void *t4_alloc_mem(size_t size); +void t4_free_mem(void *addr); + +void t4_free_sge_resources(struct adapter *adap); +irq_handler_t t4_intr_handler(struct adapter *adap); +netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev); +int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *gl); +int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb); +int t4_ofld_send(struct adapter *adap, struct sk_buff *skb); +int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, + struct net_device *dev, int intr_idx, + struct sge_fl *fl, rspq_handler_t hnd); +int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, + struct net_device *dev, struct netdev_queue *netdevq, + unsigned int iqid); +int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, + struct net_device *dev, unsigned int iqid, + unsigned int cmplqid); +int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, + struct net_device *dev, unsigned int iqid); +irqreturn_t t4_sge_intr_msix(int irq, void *cookie); +void t4_sge_init(struct adapter *adap); +void t4_sge_start(struct adapter *adap); +void t4_sge_stop(struct adapter *adap); + +#define for_each_port(adapter, iter) \ + for (iter = 0; iter < (adapter)->params.nports; ++iter) + +static inline unsigned int core_ticks_per_usec(const struct adapter *adap) +{ + return adap->params.vpd.cclk / 1000; +} + +static inline unsigned int us_to_core_ticks(const struct adapter *adap, + unsigned int us) +{ + return (us * adap->params.vpd.cclk) / 1000; +} + +void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask, + u32 val); + +int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, + void *rpl, bool sleep_ok); + +static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd, + int size, void *rpl) +{ + return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true); +} + +static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd, + int size, void *rpl) +{ + return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false); +} + +void t4_intr_enable(struct adapter *adapter); +void t4_intr_disable(struct adapter *adapter); +void t4_intr_clear(struct adapter *adapter); +int t4_slow_intr_handler(struct adapter *adapter); + +int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, + struct link_config *lc); +int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port); +int t4_seeprom_wp(struct adapter *adapter, bool enable); +int t4_read_flash(struct adapter *adapter, unsigned int addr, + unsigned int nwords, u32 *data, int byte_oriented); +int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); +int t4_check_fw_version(struct adapter *adapter); +int t4_prep_adapter(struct adapter *adapter); +int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); +void t4_fatal_err(struct adapter *adapter); +void t4_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on); +int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp, + int filter_index, int enable); +void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp, + int filter_index, int *enabled); +int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, + int start, int n, const u16 *rspq, unsigned int nrspq); +int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, + unsigned int flags); +int t4_read_rss(struct adapter *adapter, u16 *entries); +int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *parity); +int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, + u64 *parity); + +void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p); +void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p); + +void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log); +void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st); +void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, + struct tp_tcp_stats *v6); +void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, + const unsigned short *alpha, const unsigned short *beta); + +void t4_wol_magic_enable(struct adapter *adap, unsigned int port, + const u8 *addr); +int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, + u64 mask0, u64 mask1, unsigned int crc, bool enable); + +int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, + enum dev_master master, enum dev_state *state); +int t4_fw_bye(struct adapter *adap, unsigned int mbox); +int t4_early_init(struct adapter *adap, unsigned int mbox); +int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset); +int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int nparams, const u32 *params, + u32 *val); +int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int nparams, const u32 *params, + const u32 *val); +int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, + unsigned int rxqi, unsigned int rxq, unsigned int tc, + unsigned int vi, unsigned int cmask, unsigned int pmask, + unsigned int nexact, unsigned int rcaps, unsigned int wxcaps); +int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, + unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, + unsigned int *rss_size); +int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int viid); +int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, + int mtu, int promisc, int all_multi, int bcast, bool sleep_ok); +int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, + unsigned int viid, bool free, unsigned int naddr, + const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok); +int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, + int idx, const u8 *addr, bool persist, bool add_smt); +int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, + bool ucast, u64 vec, bool sleep_ok); +int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, + bool rx_en, bool tx_en); +int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, + unsigned int nblinks); +int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, + unsigned int mmd, unsigned int reg, u16 *valp); +int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, + unsigned int mmd, unsigned int reg, u16 val); +int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start, + unsigned int pf, unsigned int vf, unsigned int iqid, + unsigned int fl0id, unsigned int fl1id); +int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int iqtype, unsigned int iqid, + unsigned int fl0id, unsigned int fl1id); +int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid); +int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid); +int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid); +int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl); +#endif /* __CXGB4_H__ */ diff --git a/drivers/net/cxgb4/cxgb4_uld.h b/drivers/net/cxgb4/cxgb4_uld.h new file mode 100644 index 000000000000..5b98546ac92d --- /dev/null +++ b/drivers/net/cxgb4/cxgb4_uld.h @@ -0,0 +1,239 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CXGB4_OFLD_H +#define __CXGB4_OFLD_H + +#include +#include +#include +#include + +/* CPL message priority levels */ +enum { + CPL_PRIORITY_DATA = 0, /* data messages */ + CPL_PRIORITY_SETUP = 1, /* connection setup messages */ + CPL_PRIORITY_TEARDOWN = 0, /* connection teardown messages */ + CPL_PRIORITY_LISTEN = 1, /* listen start/stop messages */ + CPL_PRIORITY_ACK = 1, /* RX ACK messages */ + CPL_PRIORITY_CONTROL = 1 /* control messages */ +}; + +#define INIT_TP_WR(w, tid) do { \ + (w)->wr.wr_hi = htonl(FW_WR_OP(FW_TP_WR) | \ + FW_WR_IMMDLEN(sizeof(*w) - sizeof(w->wr))); \ + (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*w), 16)) | \ + FW_WR_FLOWID(tid)); \ + (w)->wr.wr_lo = cpu_to_be64(0); \ +} while (0) + +#define INIT_TP_WR_CPL(w, cpl, tid) do { \ + INIT_TP_WR(w, tid); \ + OPCODE_TID(w) = htonl(MK_OPCODE_TID(cpl, tid)); \ +} while (0) + +#define INIT_ULPTX_WR(w, wrlen, atomic, tid) do { \ + (w)->wr.wr_hi = htonl(FW_WR_OP(FW_ULPTX_WR) | FW_WR_ATOMIC(atomic)); \ + (w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(wrlen, 16)) | \ + FW_WR_FLOWID(tid)); \ + (w)->wr.wr_lo = cpu_to_be64(0); \ +} while (0) + +/* Special asynchronous notification message */ +#define CXGB4_MSG_AN ((void *)1) + +struct serv_entry { + void *data; +}; + +union aopen_entry { + void *data; + union aopen_entry *next; +}; + +/* + * Holds the size, base address, free list start, etc of the TID, server TID, + * and active-open TID tables. The tables themselves are allocated dynamically. + */ +struct tid_info { + void **tid_tab; + unsigned int ntids; + + struct serv_entry *stid_tab; + unsigned long *stid_bmap; + unsigned int nstids; + unsigned int stid_base; + + union aopen_entry *atid_tab; + unsigned int natids; + + unsigned int nftids; + unsigned int ftid_base; + + spinlock_t atid_lock ____cacheline_aligned_in_smp; + union aopen_entry *afree; + unsigned int atids_in_use; + + spinlock_t stid_lock; + unsigned int stids_in_use; + + atomic_t tids_in_use; +}; + +static inline void *lookup_tid(const struct tid_info *t, unsigned int tid) +{ + return tid < t->ntids ? t->tid_tab[tid] : NULL; +} + +static inline void *lookup_atid(const struct tid_info *t, unsigned int atid) +{ + return atid < t->natids ? t->atid_tab[atid].data : NULL; +} + +static inline void *lookup_stid(const struct tid_info *t, unsigned int stid) +{ + stid -= t->stid_base; + return stid < t->nstids ? t->stid_tab[stid].data : NULL; +} + +static inline void cxgb4_insert_tid(struct tid_info *t, void *data, + unsigned int tid) +{ + t->tid_tab[tid] = data; + atomic_inc(&t->tids_in_use); +} + +int cxgb4_alloc_atid(struct tid_info *t, void *data); +int cxgb4_alloc_stid(struct tid_info *t, int family, void *data); +void cxgb4_free_atid(struct tid_info *t, unsigned int atid); +void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family); +void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid); +void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, + unsigned int tid); + +struct in6_addr; + +int cxgb4_create_server(const struct net_device *dev, unsigned int stid, + __be32 sip, __be16 sport, unsigned int queue); +int cxgb4_create_server6(const struct net_device *dev, unsigned int stid, + const struct in6_addr *sip, __be16 sport, + unsigned int queue); + +static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue) +{ + skb_set_queue_mapping(skb, (queue << 1) | prio); +} + +enum cxgb4_uld { + CXGB4_ULD_RDMA, + CXGB4_ULD_ISCSI, + CXGB4_ULD_MAX +}; + +enum cxgb4_state { + CXGB4_STATE_UP, + CXGB4_STATE_START_RECOVERY, + CXGB4_STATE_DOWN, + CXGB4_STATE_DETACH +}; + +struct pci_dev; +struct l2t_data; +struct net_device; +struct pkt_gl; +struct tp_tcp_stats; + +struct cxgb4_range { + unsigned int start; + unsigned int size; +}; + +struct cxgb4_virt_res { /* virtualized HW resources */ + struct cxgb4_range ddp; + struct cxgb4_range iscsi; + struct cxgb4_range stag; + struct cxgb4_range rq; + struct cxgb4_range pbl; +}; + +/* + * Block of information the LLD provides to ULDs attaching to a device. + */ +struct cxgb4_lld_info { + struct pci_dev *pdev; /* associated PCI device */ + struct l2t_data *l2t; /* L2 table */ + struct tid_info *tids; /* TID table */ + struct net_device **ports; /* device ports */ + const struct cxgb4_virt_res *vr; /* assorted HW resources */ + const unsigned short *mtus; /* MTU table */ + const unsigned short *rxq_ids; /* the ULD's Rx queue ids */ + unsigned short nrxq; /* # of Rx queues */ + unsigned short ntxq; /* # of Tx queues */ + unsigned char nchan:4; /* # of channels */ + unsigned char nports:4; /* # of ports */ + unsigned char wr_cred; /* WR 16-byte credits */ + unsigned char adapter_type; /* type of adapter */ + unsigned char fw_api_ver; /* FW API version */ + unsigned int fw_vers; /* FW version */ + unsigned int iscsi_iolen; /* iSCSI max I/O length */ + unsigned short udb_density; /* # of user DB/page */ + unsigned short ucq_density; /* # of user CQs/page */ + void __iomem *gts_reg; /* address of GTS register */ + void __iomem *db_reg; /* address of kernel doorbell */ +}; + +struct cxgb4_uld_info { + const char *name; + void *(*add)(const struct cxgb4_lld_info *p); + int (*rx_handler)(void *handle, const __be64 *rsp, + const struct pkt_gl *gl); + int (*state_change)(void *handle, enum cxgb4_state new_state); +}; + +int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p); +int cxgb4_unregister_uld(enum cxgb4_uld type); +int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb); +unsigned int cxgb4_port_chan(const struct net_device *dev); +unsigned int cxgb4_port_viid(const struct net_device *dev); +unsigned int cxgb4_port_idx(const struct net_device *dev); +struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id); +unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu, + unsigned int *idx); +void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, + struct tp_tcp_stats *v6); +void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask, + const unsigned int *pgsz_order); +struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl, + unsigned int skb_len, unsigned int pull_len); +#endif /* !__CXGB4_OFLD_H */ diff --git a/drivers/net/cxgb4/l2t.c b/drivers/net/cxgb4/l2t.c new file mode 100644 index 000000000000..9f96724a133a --- /dev/null +++ b/drivers/net/cxgb4/l2t.c @@ -0,0 +1,624 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include "cxgb4.h" +#include "l2t.h" +#include "t4_msg.h" +#include "t4fw_api.h" + +#define VLAN_NONE 0xfff + +/* identifies sync vs async L2T_WRITE_REQs */ +#define F_SYNC_WR (1 << 12) + +enum { + L2T_STATE_VALID, /* entry is up to date */ + L2T_STATE_STALE, /* entry may be used but needs revalidation */ + L2T_STATE_RESOLVING, /* entry needs address resolution */ + L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */ + + /* when state is one of the below the entry is not hashed */ + L2T_STATE_SWITCHING, /* entry is being used by a switching filter */ + L2T_STATE_UNUSED /* entry not in use */ +}; + +struct l2t_data { + rwlock_t lock; + atomic_t nfree; /* number of free entries */ + struct l2t_entry *rover; /* starting point for next allocation */ + struct l2t_entry l2tab[L2T_SIZE]; +}; + +static inline unsigned int vlan_prio(const struct l2t_entry *e) +{ + return e->vlan >> 13; +} + +static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) +{ + if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ + atomic_dec(&d->nfree); +} + +/* + * To avoid having to check address families we do not allow v4 and v6 + * neighbors to be on the same hash chain. We keep v4 entries in the first + * half of available hash buckets and v6 in the second. + */ +enum { + L2T_SZ_HALF = L2T_SIZE / 2, + L2T_HASH_MASK = L2T_SZ_HALF - 1 +}; + +static inline unsigned int arp_hash(const u32 *key, int ifindex) +{ + return jhash_2words(*key, ifindex, 0) & L2T_HASH_MASK; +} + +static inline unsigned int ipv6_hash(const u32 *key, int ifindex) +{ + u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3]; + + return L2T_SZ_HALF + (jhash_2words(xor, ifindex, 0) & L2T_HASH_MASK); +} + +static unsigned int addr_hash(const u32 *addr, int addr_len, int ifindex) +{ + return addr_len == 4 ? arp_hash(addr, ifindex) : + ipv6_hash(addr, ifindex); +} + +/* + * Checks if an L2T entry is for the given IP/IPv6 address. It does not check + * whether the L2T entry and the address are of the same address family. + * Callers ensure an address is only checked against L2T entries of the same + * family, something made trivial by the separation of IP and IPv6 hash chains + * mentioned above. Returns 0 if there's a match, + */ +static int addreq(const struct l2t_entry *e, const u32 *addr) +{ + if (e->v6) + return (e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) | + (e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]); + return e->addr[0] ^ addr[0]; +} + +static void neigh_replace(struct l2t_entry *e, struct neighbour *n) +{ + neigh_hold(n); + if (e->neigh) + neigh_release(e->neigh); + e->neigh = n; +} + +/* + * Write an L2T entry. Must be called with the entry locked. + * The write may be synchronous or asynchronous. + */ +static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync) +{ + struct sk_buff *skb; + struct cpl_l2t_write_req *req; + + skb = alloc_skb(sizeof(*req), GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req)); + INIT_TP_WR(req, 0); + + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, + e->idx | (sync ? F_SYNC_WR : 0) | + TID_QID(adap->sge.fw_evtq.abs_id))); + req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync)); + req->l2t_idx = htons(e->idx); + req->vlan = htons(e->vlan); + if (e->neigh) + memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac)); + memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); + + set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); + t4_ofld_send(adap, skb); + + if (sync && e->state != L2T_STATE_SWITCHING) + e->state = L2T_STATE_SYNC_WRITE; + return 0; +} + +/* + * Send packets waiting in an L2T entry's ARP queue. Must be called with the + * entry locked. + */ +static void send_pending(struct adapter *adap, struct l2t_entry *e) +{ + while (e->arpq_head) { + struct sk_buff *skb = e->arpq_head; + + e->arpq_head = skb->next; + skb->next = NULL; + t4_ofld_send(adap, skb); + } + e->arpq_tail = NULL; +} + +/* + * Process a CPL_L2T_WRITE_RPL. Wake up the ARP queue if it completes a + * synchronous L2T_WRITE. Note that the TID in the reply is really the L2T + * index it refers to. + */ +void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl) +{ + unsigned int tid = GET_TID(rpl); + unsigned int idx = tid & (L2T_SIZE - 1); + + if (unlikely(rpl->status != CPL_ERR_NONE)) { + dev_err(adap->pdev_dev, + "Unexpected L2T_WRITE_RPL status %u for entry %u\n", + rpl->status, idx); + return; + } + + if (tid & F_SYNC_WR) { + struct l2t_entry *e = &adap->l2t->l2tab[idx]; + + spin_lock(&e->lock); + if (e->state != L2T_STATE_SWITCHING) { + send_pending(adap, e); + e->state = (e->neigh->nud_state & NUD_STALE) ? + L2T_STATE_STALE : L2T_STATE_VALID; + } + spin_unlock(&e->lock); + } +} + +/* + * Add a packet to an L2T entry's queue of packets awaiting resolution. + * Must be called with the entry's lock held. + */ +static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb) +{ + skb->next = NULL; + if (e->arpq_head) + e->arpq_tail->next = skb; + else + e->arpq_head = skb; + e->arpq_tail = skb; +} + +int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb, + struct l2t_entry *e) +{ + struct adapter *adap = netdev2adap(dev); + +again: + switch (e->state) { + case L2T_STATE_STALE: /* entry is stale, kick off revalidation */ + neigh_event_send(e->neigh, NULL); + spin_lock_bh(&e->lock); + if (e->state == L2T_STATE_STALE) + e->state = L2T_STATE_VALID; + spin_unlock_bh(&e->lock); + case L2T_STATE_VALID: /* fast-path, send the packet on */ + return t4_ofld_send(adap, skb); + case L2T_STATE_RESOLVING: + case L2T_STATE_SYNC_WRITE: + spin_lock_bh(&e->lock); + if (e->state != L2T_STATE_SYNC_WRITE && + e->state != L2T_STATE_RESOLVING) { + spin_unlock_bh(&e->lock); + goto again; + } + arpq_enqueue(e, skb); + spin_unlock_bh(&e->lock); + + if (e->state == L2T_STATE_RESOLVING && + !neigh_event_send(e->neigh, NULL)) { + spin_lock_bh(&e->lock); + if (e->state == L2T_STATE_RESOLVING && e->arpq_head) + write_l2e(adap, e, 1); + spin_unlock_bh(&e->lock); + } + } + return 0; +} +EXPORT_SYMBOL(cxgb4_l2t_send); + +/* + * Allocate a free L2T entry. Must be called with l2t_data.lock held. + */ +static struct l2t_entry *alloc_l2e(struct l2t_data *d) +{ + struct l2t_entry *end, *e, **p; + + if (!atomic_read(&d->nfree)) + return NULL; + + /* there's definitely a free entry */ + for (e = d->rover, end = &d->l2tab[L2T_SIZE]; e != end; ++e) + if (atomic_read(&e->refcnt) == 0) + goto found; + + for (e = d->l2tab; atomic_read(&e->refcnt); ++e) + ; +found: + d->rover = e + 1; + atomic_dec(&d->nfree); + + /* + * The entry we found may be an inactive entry that is + * presently in the hash table. We need to remove it. + */ + if (e->state < L2T_STATE_SWITCHING) + for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next) + if (*p == e) { + *p = e->next; + e->next = NULL; + break; + } + + e->state = L2T_STATE_UNUSED; + return e; +} + +/* + * Called when an L2T entry has no more users. + */ +static void t4_l2e_free(struct l2t_entry *e) +{ + struct l2t_data *d; + + spin_lock_bh(&e->lock); + if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */ + if (e->neigh) { + neigh_release(e->neigh); + e->neigh = NULL; + } + } + spin_unlock_bh(&e->lock); + + d = container_of(e, struct l2t_data, l2tab[e->idx]); + atomic_inc(&d->nfree); +} + +void cxgb4_l2t_release(struct l2t_entry *e) +{ + if (atomic_dec_and_test(&e->refcnt)) + t4_l2e_free(e); +} +EXPORT_SYMBOL(cxgb4_l2t_release); + +/* + * Update an L2T entry that was previously used for the same next hop as neigh. + * Must be called with softirqs disabled. + */ +static void reuse_entry(struct l2t_entry *e, struct neighbour *neigh) +{ + unsigned int nud_state; + + spin_lock(&e->lock); /* avoid race with t4_l2t_free */ + if (neigh != e->neigh) + neigh_replace(e, neigh); + nud_state = neigh->nud_state; + if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) || + !(nud_state & NUD_VALID)) + e->state = L2T_STATE_RESOLVING; + else if (nud_state & NUD_CONNECTED) + e->state = L2T_STATE_VALID; + else + e->state = L2T_STATE_STALE; + spin_unlock(&e->lock); +} + +struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, + const struct net_device *physdev, + unsigned int priority) +{ + u8 lport; + u16 vlan; + struct l2t_entry *e; + int addr_len = neigh->tbl->key_len; + u32 *addr = (u32 *)neigh->primary_key; + int ifidx = neigh->dev->ifindex; + int hash = addr_hash(addr, addr_len, ifidx); + + if (neigh->dev->flags & IFF_LOOPBACK) + lport = netdev2pinfo(physdev)->tx_chan + 4; + else + lport = netdev2pinfo(physdev)->lport; + + if (neigh->dev->priv_flags & IFF_802_1Q_VLAN) + vlan = vlan_dev_vlan_id(neigh->dev); + else + vlan = VLAN_NONE; + + write_lock_bh(&d->lock); + for (e = d->l2tab[hash].first; e; e = e->next) + if (!addreq(e, addr) && e->ifindex == ifidx && + e->vlan == vlan && e->lport == lport) { + l2t_hold(d, e); + if (atomic_read(&e->refcnt) == 1) + reuse_entry(e, neigh); + goto done; + } + + /* Need to allocate a new entry */ + e = alloc_l2e(d); + if (e) { + spin_lock(&e->lock); /* avoid race with t4_l2t_free */ + e->state = L2T_STATE_RESOLVING; + memcpy(e->addr, addr, addr_len); + e->ifindex = ifidx; + e->hash = hash; + e->lport = lport; + e->v6 = addr_len == 16; + atomic_set(&e->refcnt, 1); + neigh_replace(e, neigh); + e->vlan = vlan; + e->next = d->l2tab[hash].first; + d->l2tab[hash].first = e; + spin_unlock(&e->lock); + } +done: + write_unlock_bh(&d->lock); + return e; +} +EXPORT_SYMBOL(cxgb4_l2t_get); + +/* + * Called when address resolution fails for an L2T entry to handle packets + * on the arpq head. If a packet specifies a failure handler it is invoked, + * otherwise the packet is sent to the device. + */ +static void handle_failed_resolution(struct adapter *adap, struct sk_buff *arpq) +{ + while (arpq) { + struct sk_buff *skb = arpq; + const struct l2t_skb_cb *cb = L2T_SKB_CB(skb); + + arpq = skb->next; + skb->next = NULL; + if (cb->arp_err_handler) + cb->arp_err_handler(cb->handle, skb); + else + t4_ofld_send(adap, skb); + } +} + +/* + * Called when the host's neighbor layer makes a change to some entry that is + * loaded into the HW L2 table. + */ +void t4_l2t_update(struct adapter *adap, struct neighbour *neigh) +{ + struct l2t_entry *e; + struct sk_buff *arpq = NULL; + struct l2t_data *d = adap->l2t; + int addr_len = neigh->tbl->key_len; + u32 *addr = (u32 *) neigh->primary_key; + int ifidx = neigh->dev->ifindex; + int hash = addr_hash(addr, addr_len, ifidx); + + read_lock_bh(&d->lock); + for (e = d->l2tab[hash].first; e; e = e->next) + if (!addreq(e, addr) && e->ifindex == ifidx) { + spin_lock(&e->lock); + if (atomic_read(&e->refcnt)) + goto found; + spin_unlock(&e->lock); + break; + } + read_unlock_bh(&d->lock); + return; + + found: + read_unlock(&d->lock); + + if (neigh != e->neigh) + neigh_replace(e, neigh); + + if (e->state == L2T_STATE_RESOLVING) { + if (neigh->nud_state & NUD_FAILED) { + arpq = e->arpq_head; + e->arpq_head = e->arpq_tail = NULL; + } else if ((neigh->nud_state & (NUD_CONNECTED | NUD_STALE)) && + e->arpq_head) { + write_l2e(adap, e, 1); + } + } else { + e->state = neigh->nud_state & NUD_CONNECTED ? + L2T_STATE_VALID : L2T_STATE_STALE; + if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac))) + write_l2e(adap, e, 0); + } + + spin_unlock_bh(&e->lock); + + if (arpq) + handle_failed_resolution(adap, arpq); +} + +/* + * Allocate an L2T entry for use by a switching rule. Such entries need to be + * explicitly freed and while busy they are not on any hash chain, so normal + * address resolution updates do not see them. + */ +struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d) +{ + struct l2t_entry *e; + + write_lock_bh(&d->lock); + e = alloc_l2e(d); + if (e) { + spin_lock(&e->lock); /* avoid race with t4_l2t_free */ + e->state = L2T_STATE_SWITCHING; + atomic_set(&e->refcnt, 1); + spin_unlock(&e->lock); + } + write_unlock_bh(&d->lock); + return e; +} + +/* + * Sets/updates the contents of a switching L2T entry that has been allocated + * with an earlier call to @t4_l2t_alloc_switching. + */ +int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, + u8 port, u8 *eth_addr) +{ + e->vlan = vlan; + e->lport = port; + memcpy(e->dmac, eth_addr, ETH_ALEN); + return write_l2e(adap, e, 0); +} + +struct l2t_data *t4_init_l2t(void) +{ + int i; + struct l2t_data *d; + + d = t4_alloc_mem(sizeof(*d)); + if (!d) + return NULL; + + d->rover = d->l2tab; + atomic_set(&d->nfree, L2T_SIZE); + rwlock_init(&d->lock); + + for (i = 0; i < L2T_SIZE; ++i) { + d->l2tab[i].idx = i; + d->l2tab[i].state = L2T_STATE_UNUSED; + spin_lock_init(&d->l2tab[i].lock); + atomic_set(&d->l2tab[i].refcnt, 0); + } + return d; +} + +#include +#include +#include + +static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos) +{ + struct l2t_entry *l2tab = seq->private; + + return pos >= L2T_SIZE ? NULL : &l2tab[pos]; +} + +static void *l2t_seq_start(struct seq_file *seq, loff_t *pos) +{ + return *pos ? l2t_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; +} + +static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + v = l2t_get_idx(seq, *pos); + if (v) + ++*pos; + return v; +} + +static void l2t_seq_stop(struct seq_file *seq, void *v) +{ +} + +static char l2e_state(const struct l2t_entry *e) +{ + switch (e->state) { + case L2T_STATE_VALID: return 'V'; + case L2T_STATE_STALE: return 'S'; + case L2T_STATE_SYNC_WRITE: return 'W'; + case L2T_STATE_RESOLVING: return e->arpq_head ? 'A' : 'R'; + case L2T_STATE_SWITCHING: return 'X'; + default: + return 'U'; + } +} + +static int l2t_seq_show(struct seq_file *seq, void *v) +{ + if (v == SEQ_START_TOKEN) + seq_puts(seq, " Idx IP address " + "Ethernet address VLAN/P LP State Users Port\n"); + else { + char ip[60]; + struct l2t_entry *e = v; + + spin_lock_bh(&e->lock); + if (e->state == L2T_STATE_SWITCHING) + ip[0] = '\0'; + else + sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr); + seq_printf(seq, "%4u %-25s %17pM %4d %u %2u %c %5u %s\n", + e->idx, ip, e->dmac, + e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport, + l2e_state(e), atomic_read(&e->refcnt), + e->neigh ? e->neigh->dev->name : ""); + spin_unlock_bh(&e->lock); + } + return 0; +} + +static const struct seq_operations l2t_seq_ops = { + .start = l2t_seq_start, + .next = l2t_seq_next, + .stop = l2t_seq_stop, + .show = l2t_seq_show +}; + +static int l2t_seq_open(struct inode *inode, struct file *file) +{ + int rc = seq_open(file, &l2t_seq_ops); + + if (!rc) { + struct adapter *adap = inode->i_private; + struct seq_file *seq = file->private_data; + + seq->private = adap->l2t->l2tab; + } + return rc; +} + +const struct file_operations t4_l2t_fops = { + .owner = THIS_MODULE, + .open = l2t_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; diff --git a/drivers/net/cxgb4/l2t.h b/drivers/net/cxgb4/l2t.h new file mode 100644 index 000000000000..643f27ed3cf4 --- /dev/null +++ b/drivers/net/cxgb4/l2t.h @@ -0,0 +1,110 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CXGB4_L2T_H +#define __CXGB4_L2T_H + +#include +#include +#include + +struct adapter; +struct l2t_data; +struct neighbour; +struct net_device; +struct file_operations; +struct cpl_l2t_write_rpl; + +/* + * Each L2T entry plays multiple roles. First of all, it keeps state for the + * corresponding entry of the HW L2 table and maintains a queue of offload + * packets awaiting address resolution. Second, it is a node of a hash table + * chain, where the nodes of the chain are linked together through their next + * pointer. Finally, each node is a bucket of a hash table, pointing to the + * first element in its chain through its first pointer. + */ +struct l2t_entry { + u16 state; /* entry state */ + u16 idx; /* entry index */ + u32 addr[4]; /* next hop IP or IPv6 address */ + int ifindex; /* neighbor's net_device's ifindex */ + struct neighbour *neigh; /* associated neighbour */ + struct l2t_entry *first; /* start of hash chain */ + struct l2t_entry *next; /* next l2t_entry on chain */ + struct sk_buff *arpq_head; /* queue of packets awaiting resolution */ + struct sk_buff *arpq_tail; + spinlock_t lock; + atomic_t refcnt; /* entry reference count */ + u16 hash; /* hash bucket the entry is on */ + u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */ + u8 v6; /* whether entry is for IPv6 */ + u8 lport; /* associated offload logical interface */ + u8 dmac[ETH_ALEN]; /* neighbour's MAC address */ +}; + +typedef void (*arp_err_handler_t)(void *handle, struct sk_buff *skb); + +/* + * Callback stored in an skb to handle address resolution failure. + */ +struct l2t_skb_cb { + void *handle; + arp_err_handler_t arp_err_handler; +}; + +#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb) + +static inline void t4_set_arp_err_handler(struct sk_buff *skb, void *handle, + arp_err_handler_t handler) +{ + L2T_SKB_CB(skb)->handle = handle; + L2T_SKB_CB(skb)->arp_err_handler = handler; +} + +void cxgb4_l2t_release(struct l2t_entry *e); +int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb, + struct l2t_entry *e); +struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, + const struct net_device *physdev, + unsigned int priority); + +void t4_l2t_update(struct adapter *adap, struct neighbour *neigh); +struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d); +int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, + u8 port, u8 *eth_addr); +struct l2t_data *t4_init_l2t(void); +void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl); + +extern const struct file_operations t4_l2t_fops; +#endif /* __CXGB4_L2T_H */ -- cgit v1.2.3 From b8ff05a9c3237f694a1c3bf8ceec3bf6c3c14b15 Mon Sep 17 00:00:00 2001 From: Dimitris Michailidis Date: Thu, 1 Apr 2010 15:28:26 +0000 Subject: cxgb4: Add main driver file and driver Makefile Signed-off-by: Dimitris Michailidis Signed-off-by: David S. Miller --- drivers/net/cxgb4/Makefile | 7 + drivers/net/cxgb4/cxgb4_main.c | 3388 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 3395 insertions(+) create mode 100644 drivers/net/cxgb4/Makefile create mode 100644 drivers/net/cxgb4/cxgb4_main.c diff --git a/drivers/net/cxgb4/Makefile b/drivers/net/cxgb4/Makefile new file mode 100644 index 000000000000..498667487f52 --- /dev/null +++ b/drivers/net/cxgb4/Makefile @@ -0,0 +1,7 @@ +# +# Chelsio T4 driver +# + +obj-$(CONFIG_CHELSIO_T4) += cxgb4.o + +cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c new file mode 100644 index 000000000000..a7e30a23d322 --- /dev/null +++ b/drivers/net/cxgb4/cxgb4_main.c @@ -0,0 +1,3388 @@ +/* + * This file is part of the Chelsio T4 Ethernet driver for Linux. + * + * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cxgb4.h" +#include "t4_regs.h" +#include "t4_msg.h" +#include "t4fw_api.h" +#include "l2t.h" + +#define DRV_VERSION "1.0.0-ko" +#define DRV_DESC "Chelsio T4 Network Driver" + +/* + * Max interrupt hold-off timer value in us. Queues fall back to this value + * under extreme memory pressure so it's largish to give the system time to + * recover. + */ +#define MAX_SGE_TIMERVAL 200U + +enum { + MEMWIN0_APERTURE = 65536, + MEMWIN0_BASE = 0x30000, + MEMWIN1_APERTURE = 32768, + MEMWIN1_BASE = 0x28000, + MEMWIN2_APERTURE = 2048, + MEMWIN2_BASE = 0x1b800, +}; + +enum { + MAX_TXQ_ENTRIES = 16384, + MAX_CTRL_TXQ_ENTRIES = 1024, + MAX_RSPQ_ENTRIES = 16384, + MAX_RX_BUFFERS = 16384, + MIN_TXQ_ENTRIES = 32, + MIN_CTRL_TXQ_ENTRIES = 32, + MIN_RSPQ_ENTRIES = 128, + MIN_FL_ENTRIES = 16 +}; + +#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ + NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) + +#define CH_DEVICE(devid) { PCI_VDEVICE(CHELSIO, devid), 0 } + +static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = { + CH_DEVICE(0xa000), /* PE10K */ + { 0, } +}; + +#define FW_FNAME "cxgb4/t4fw.bin" + +MODULE_DESCRIPTION(DRV_DESC); +MODULE_AUTHOR("Chelsio Communications"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(DRV_VERSION); +MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl); +MODULE_FIRMWARE(FW_FNAME); + +static int dflt_msg_enable = DFLT_MSG_ENABLE; + +module_param(dflt_msg_enable, int, 0644); +MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap"); + +/* + * The driver uses the best interrupt scheme available on a platform in the + * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which + * of these schemes the driver may consider as follows: + * + * msi = 2: choose from among all three options + * msi = 1: only consider MSI and INTx interrupts + * msi = 0: force INTx interrupts + */ +static int msi = 2; + +module_param(msi, int, 0644); +MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)"); + +/* + * Queue interrupt hold-off timer values. Queues default to the first of these + * upon creation. + */ +static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 }; + +module_param_array(intr_holdoff, uint, NULL, 0644); +MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers " + "0..4 in microseconds"); + +static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 }; + +module_param_array(intr_cnt, uint, NULL, 0644); +MODULE_PARM_DESC(intr_cnt, + "thresholds 1..3 for queue interrupt packet counters"); + +static int vf_acls; + +#ifdef CONFIG_PCI_IOV +module_param(vf_acls, bool, 0644); +MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement"); + +static unsigned int num_vf[4]; + +module_param_array(num_vf, uint, NULL, 0644); +MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3"); +#endif + +static struct dentry *cxgb4_debugfs_root; + +static LIST_HEAD(adapter_list); +static DEFINE_MUTEX(uld_mutex); +static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX]; +static const char *uld_str[] = { "RDMA", "iSCSI" }; + +static void link_report(struct net_device *dev) +{ + if (!netif_carrier_ok(dev)) + netdev_info(dev, "link down\n"); + else { + static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" }; + + const char *s = "10Mbps"; + const struct port_info *p = netdev_priv(dev); + + switch (p->link_cfg.speed) { + case SPEED_10000: + s = "10Gbps"; + break; + case SPEED_1000: + s = "1000Mbps"; + break; + case SPEED_100: + s = "100Mbps"; + break; + } + + netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, + fc[p->link_cfg.fc]); + } +} + +void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat) +{ + struct net_device *dev = adapter->port[port_id]; + + /* Skip changes from disabled ports. */ + if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) { + if (link_stat) + netif_carrier_on(dev); + else + netif_carrier_off(dev); + + link_report(dev); + } +} + +void t4_os_portmod_changed(const struct adapter *adap, int port_id) +{ + static const char *mod_str[] = { + NULL, "LR", "SR", "ER", "passive DA", "active DA" + }; + + const struct net_device *dev = adap->port[port_id]; + const struct port_info *pi = netdev_priv(dev); + + if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) + netdev_info(dev, "port module unplugged\n"); + else + netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]); +} + +/* + * Configure the exact and hash address filters to handle a port's multicast + * and secondary unicast MAC addresses. + */ +static int set_addr_filters(const struct net_device *dev, bool sleep) +{ + u64 mhash = 0; + u64 uhash = 0; + bool free = true; + u16 filt_idx[7]; + const u8 *addr[7]; + int ret, naddr = 0; + const struct dev_addr_list *d; + const struct netdev_hw_addr *ha; + int uc_cnt = netdev_uc_count(dev); + const struct port_info *pi = netdev_priv(dev); + + /* first do the secondary unicast addresses */ + netdev_for_each_uc_addr(ha, dev) { + addr[naddr++] = ha->addr; + if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) { + ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free, + naddr, addr, filt_idx, &uhash, sleep); + if (ret < 0) + return ret; + + free = false; + naddr = 0; + } + } + + /* next set up the multicast addresses */ + netdev_for_each_mc_addr(d, dev) { + addr[naddr++] = d->dmi_addr; + if (naddr >= ARRAY_SIZE(addr) || d->next == NULL) { + ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free, + naddr, addr, filt_idx, &mhash, sleep); + if (ret < 0) + return ret; + + free = false; + naddr = 0; + } + } + + return t4_set_addr_hash(pi->adapter, 0, pi->viid, uhash != 0, + uhash | mhash, sleep); +} + +/* + * Set Rx properties of a port, such as promiscruity, address filters, and MTU. + * If @mtu is -1 it is left unchanged. + */ +static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) +{ + int ret; + struct port_info *pi = netdev_priv(dev); + + ret = set_addr_filters(dev, sleep_ok); + if (ret == 0) + ret = t4_set_rxmode(pi->adapter, 0, pi->viid, mtu, + (dev->flags & IFF_PROMISC) ? 1 : 0, + (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, + sleep_ok); + return ret; +} + +/** + * link_start - enable a port + * @dev: the port to enable + * + * Performs the MAC and PHY actions needed to enable a port. + */ +static int link_start(struct net_device *dev) +{ + int ret; + struct port_info *pi = netdev_priv(dev); + + /* + * We do not set address filters and promiscuity here, the stack does + * that step explicitly. + */ + ret = t4_set_rxmode(pi->adapter, 0, pi->viid, dev->mtu, -1, -1, -1, + true); + if (ret == 0) { + ret = t4_change_mac(pi->adapter, 0, pi->viid, + pi->xact_addr_filt, dev->dev_addr, true, + false); + if (ret >= 0) { + pi->xact_addr_filt = ret; + ret = 0; + } + } + if (ret == 0) + ret = t4_link_start(pi->adapter, 0, pi->tx_chan, &pi->link_cfg); + if (ret == 0) + ret = t4_enable_vi(pi->adapter, 0, pi->viid, true, true); + return ret; +} + +/* + * Response queue handler for the FW event queue. + */ +static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *gl) +{ + u8 opcode = ((const struct rss_header *)rsp)->opcode; + + rsp++; /* skip RSS header */ + if (likely(opcode == CPL_SGE_EGR_UPDATE)) { + const struct cpl_sge_egr_update *p = (void *)rsp; + unsigned int qid = EGR_QID(ntohl(p->opcode_qid)); + struct sge_txq *txq = q->adap->sge.egr_map[qid]; + + txq->restarts++; + if ((u8 *)txq < (u8 *)q->adap->sge.ethrxq) { + struct sge_eth_txq *eq; + + eq = container_of(txq, struct sge_eth_txq, q); + netif_tx_wake_queue(eq->txq); + } else { + struct sge_ofld_txq *oq; + + oq = container_of(txq, struct sge_ofld_txq, q); + tasklet_schedule(&oq->qresume_tsk); + } + } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) { + const struct cpl_fw6_msg *p = (void *)rsp; + + if (p->type == 0) + t4_handle_fw_rpl(q->adap, p->data); + } else if (opcode == CPL_L2T_WRITE_RPL) { + const struct cpl_l2t_write_rpl *p = (void *)rsp; + + do_l2t_write_rpl(q->adap, p); + } else + dev_err(q->adap->pdev_dev, + "unexpected CPL %#x on FW event queue\n", opcode); + return 0; +} + +/** + * uldrx_handler - response queue handler for ULD queues + * @q: the response queue that received the packet + * @rsp: the response queue descriptor holding the offload message + * @gl: the gather list of packet fragments + * + * Deliver an ingress offload packet to a ULD. All processing is done by + * the ULD, we just maintain statistics. + */ +static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *gl) +{ + struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq); + + if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) { + rxq->stats.nomem++; + return -1; + } + if (gl == NULL) + rxq->stats.imm++; + else if (gl == CXGB4_MSG_AN) + rxq->stats.an++; + else + rxq->stats.pkts++; + return 0; +} + +static void disable_msi(struct adapter *adapter) +{ + if (adapter->flags & USING_MSIX) { + pci_disable_msix(adapter->pdev); + adapter->flags &= ~USING_MSIX; + } else if (adapter->flags & USING_MSI) { + pci_disable_msi(adapter->pdev); + adapter->flags &= ~USING_MSI; + } +} + +/* + * Interrupt handler for non-data events used with MSI-X. + */ +static irqreturn_t t4_nondata_intr(int irq, void *cookie) +{ + struct adapter *adap = cookie; + + u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE)); + if (v & PFSW) { + adap->swintr = 1; + t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v); + } + t4_slow_intr_handler(adap); + return IRQ_HANDLED; +} + +/* + * Name the MSI-X interrupts. + */ +static void name_msix_vecs(struct adapter *adap) +{ + int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc) - 1; + + /* non-data interrupts */ + snprintf(adap->msix_info[0].desc, n, "%s", adap->name); + adap->msix_info[0].desc[n] = 0; + + /* FW events */ + snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", adap->name); + adap->msix_info[1].desc[n] = 0; + + /* Ethernet queues */ + for_each_port(adap, j) { + struct net_device *d = adap->port[j]; + const struct port_info *pi = netdev_priv(d); + + for (i = 0; i < pi->nqsets; i++, msi_idx++) { + snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d", + d->name, i); + adap->msix_info[msi_idx].desc[n] = 0; + } + } + + /* offload queues */ + for_each_ofldrxq(&adap->sge, i) { + snprintf(adap->msix_info[msi_idx].desc, n, "%s-ofld%d", + adap->name, i); + adap->msix_info[msi_idx++].desc[n] = 0; + } + for_each_rdmarxq(&adap->sge, i) { + snprintf(adap->msix_info[msi_idx].desc, n, "%s-rdma%d", + adap->name, i); + adap->msix_info[msi_idx++].desc[n] = 0; + } +} + +static int request_msix_queue_irqs(struct adapter *adap) +{ + struct sge *s = &adap->sge; + int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2; + + err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0, + adap->msix_info[1].desc, &s->fw_evtq); + if (err) + return err; + + for_each_ethrxq(s, ethqidx) { + err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, + adap->msix_info[msi].desc, + &s->ethrxq[ethqidx].rspq); + if (err) + goto unwind; + msi++; + } + for_each_ofldrxq(s, ofldqidx) { + err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, + adap->msix_info[msi].desc, + &s->ofldrxq[ofldqidx].rspq); + if (err) + goto unwind; + msi++; + } + for_each_rdmarxq(s, rdmaqidx) { + err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0, + adap->msix_info[msi].desc, + &s->rdmarxq[rdmaqidx].rspq); + if (err) + goto unwind; + msi++; + } + return 0; + +unwind: + while (--rdmaqidx >= 0) + free_irq(adap->msix_info[--msi].vec, + &s->rdmarxq[rdmaqidx].rspq); + while (--ofldqidx >= 0) + free_irq(adap->msix_info[--msi].vec, + &s->ofldrxq[ofldqidx].rspq); + while (--ethqidx >= 0) + free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq); + free_irq(adap->msix_info[1].vec, &s->fw_evtq); + return err; +} + +static void free_msix_queue_irqs(struct adapter *adap) +{ + int i, msi = 2; + struct sge *s = &adap->sge; + + free_irq(adap->msix_info[1].vec, &s->fw_evtq); + for_each_ethrxq(s, i) + free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq); + for_each_ofldrxq(s, i) + free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq); + for_each_rdmarxq(s, i) + free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq); +} + +/** + * setup_rss - configure RSS + * @adap: the adapter + * + * Sets up RSS to distribute packets to multiple receive queues. We + * configure the RSS CPU lookup table to distribute to the number of HW + * receive queues, and the response queue lookup table to narrow that + * down to the response queues actually configured for each port. + * We always configure the RSS mapping for all ports since the mapping + * table has plenty of entries. + */ +static int setup_rss(struct adapter *adap) +{ + int i, j, err; + u16 rss[MAX_ETH_QSETS]; + + for_each_port(adap, i) { + const struct port_info *pi = adap2pinfo(adap, i); + const struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; + + for (j = 0; j < pi->nqsets; j++) + rss[j] = q[j].rspq.abs_id; + + err = t4_config_rss_range(adap, 0, pi->viid, 0, pi->rss_size, + rss, pi->nqsets); + if (err) + return err; + } + return 0; +} + +/* + * Wait until all NAPI handlers are descheduled. + */ +static void quiesce_rx(struct adapter *adap) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) { + struct sge_rspq *q = adap->sge.ingr_map[i]; + + if (q && q->handler) + napi_disable(&q->napi); + } +} + +/* + * Enable NAPI scheduling and interrupt generation for all Rx queues. + */ +static void enable_rx(struct adapter *adap) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) { + struct sge_rspq *q = adap->sge.ingr_map[i]; + + if (!q) + continue; + if (q->handler) + napi_enable(&q->napi); + /* 0-increment GTS to start the timer and enable interrupts */ + t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), + SEINTARM(q->intr_params) | + INGRESSQID(q->cntxt_id)); + } +} + +/** + * setup_sge_queues - configure SGE Tx/Rx/response queues + * @adap: the adapter + * + * Determines how many sets of SGE queues to use and initializes them. + * We support multiple queue sets per port if we have MSI-X, otherwise + * just one queue set per port. + */ +static int setup_sge_queues(struct adapter *adap) +{ + int err, msi_idx, i, j; + struct sge *s = &adap->sge; + + bitmap_zero(s->starving_fl, MAX_EGRQ); + bitmap_zero(s->txq_maperr, MAX_EGRQ); + + if (adap->flags & USING_MSIX) + msi_idx = 1; /* vector 0 is for non-queue interrupts */ + else { + err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0, + NULL, NULL); + if (err) + return err; + msi_idx = -((int)s->intrq.abs_id + 1); + } + + err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], + msi_idx, NULL, fwevtq_handler); + if (err) { +freeout: t4_free_sge_resources(adap); + return err; + } + + for_each_port(adap, i) { + struct net_device *dev = adap->port[i]; + struct port_info *pi = netdev_priv(dev); + struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset]; + struct sge_eth_txq *t = &s->ethtxq[pi->first_qset]; + + for (j = 0; j < pi->nqsets; j++, q++) { + if (msi_idx > 0) + msi_idx++; + err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, + msi_idx, &q->fl, + t4_ethrx_handler); + if (err) + goto freeout; + q->rspq.idx = j; + memset(&q->stats, 0, sizeof(q->stats)); + } + for (j = 0; j < pi->nqsets; j++, t++) { + err = t4_sge_alloc_eth_txq(adap, t, dev, + netdev_get_tx_queue(dev, j), + s->fw_evtq.cntxt_id); + if (err) + goto freeout; + } + } + + j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */ + for_each_ofldrxq(s, i) { + struct sge_ofld_rxq *q = &s->ofldrxq[i]; + struct net_device *dev = adap->port[i / j]; + + if (msi_idx > 0) + msi_idx++; + err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx, + &q->fl, uldrx_handler); + if (err) + goto freeout; + memset(&q->stats, 0, sizeof(q->stats)); + s->ofld_rxq[i] = q->rspq.abs_id; + err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev, + s->fw_evtq.cntxt_id); + if (err) + goto freeout; + } + + for_each_rdmarxq(s, i) { + struct sge_ofld_rxq *q = &s->rdmarxq[i]; + + if (msi_idx > 0) + msi_idx++; + err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i], + msi_idx, &q->fl, uldrx_handler); + if (err) + goto freeout; + memset(&q->stats, 0, sizeof(q->stats)); + s->rdma_rxq[i] = q->rspq.abs_id; + } + + for_each_port(adap, i) { + /* + * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't + * have RDMA queues, and that's the right value. + */ + err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i], + s->fw_evtq.cntxt_id, + s->rdmarxq[i].rspq.cntxt_id); + if (err) + goto freeout; + } + + t4_write_reg(adap, MPS_TRC_RSS_CONTROL, + RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) | + QUEUENUMBER(s->ethrxq[0].rspq.abs_id)); + return 0; +} + +/* + * Returns 0 if new FW was successfully loaded, a positive errno if a load was + * started but failed, and a negative errno if flash load couldn't start. + */ +static int upgrade_fw(struct adapter *adap) +{ + int ret; + u32 vers; + const struct fw_hdr *hdr; + const struct firmware *fw; + struct device *dev = adap->pdev_dev; + + ret = request_firmware(&fw, FW_FNAME, dev); + if (ret < 0) { + dev_err(dev, "unable to load firmware image " FW_FNAME + ", error %d\n", ret); + return ret; + } + + hdr = (const struct fw_hdr *)fw->data; + vers = ntohl(hdr->fw_ver); + if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) { + ret = -EINVAL; /* wrong major version, won't do */ + goto out; + } + + /* + * If the flash FW is unusable or we found something newer, load it. + */ + if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR || + vers > adap->params.fw_vers) { + ret = -t4_load_fw(adap, fw->data, fw->size); + if (!ret) + dev_info(dev, "firmware upgraded to version %pI4 from " + FW_FNAME "\n", &hdr->fw_ver); + } +out: release_firmware(fw); + return ret; +} + +/* + * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc. + * The allocated memory is cleared. + */ +void *t4_alloc_mem(size_t size) +{ + void *p = kmalloc(size, GFP_KERNEL); + + if (!p) + p = vmalloc(size); + if (p) + memset(p, 0, size); + return p; +} + +/* + * Free memory allocated through alloc_mem(). + */ +void t4_free_mem(void *addr) +{ + if (is_vmalloc_addr(addr)) + vfree(addr); + else + kfree(addr); +} + +static inline int is_offload(const struct adapter *adap) +{ + return adap->params.offload; +} + +/* + * Implementation of ethtool operations. + */ + +static u32 get_msglevel(struct net_device *dev) +{ + return netdev2adap(dev)->msg_enable; +} + +static void set_msglevel(struct net_device *dev, u32 val) +{ + netdev2adap(dev)->msg_enable = val; +} + +static char stats_strings[][ETH_GSTRING_LEN] = { + "TxOctetsOK ", + "TxFramesOK ", + "TxBroadcastFrames ", + "TxMulticastFrames ", + "TxUnicastFrames ", + "TxErrorFrames ", + + "TxFrames64 ", + "TxFrames65To127 ", + "TxFrames128To255 ", + "TxFrames256To511 ", + "TxFrames512To1023 ", + "TxFrames1024To1518 ", + "TxFrames1519ToMax ", + + "TxFramesDropped ", + "TxPauseFrames ", + "TxPPP0Frames ", + "TxPPP1Frames ", + "TxPPP2Frames ", + "TxPPP3Frames ", + "TxPPP4Frames ", + "TxPPP5Frames ", + "TxPPP6Frames ", + "TxPPP7Frames ", + + "RxOctetsOK ", + "RxFramesOK ", + "RxBroadcastFrames ", + "RxMulticastFrames ", + "RxUnicastFrames ", + + "RxFramesTooLong ", + "RxJabberErrors ", + "RxFCSErrors ", + "RxLengthErrors ", + "RxSymbolErrors ", + "RxRuntFrames ", + + "RxFrames64 ", + "RxFrames65To127 ", + "RxFrames128To255 ", + "RxFrames256To511 ", + "RxFrames512To1023 ", + "RxFrames1024To1518 ", + "RxFrames1519ToMax ", + + "RxPauseFrames ", + "RxPPP0Frames ", + "RxPPP1Frames ", + "RxPPP2Frames ", + "RxPPP3Frames ", + "RxPPP4Frames ", + "RxPPP5Frames ", + "RxPPP6Frames ", + "RxPPP7Frames ", + + "RxBG0FramesDropped ", + "RxBG1FramesDropped ", + "RxBG2FramesDropped ", + "RxBG3FramesDropped ", + "RxBG0FramesTrunc ", + "RxBG1FramesTrunc ", + "RxBG2FramesTrunc ", + "RxBG3FramesTrunc ", + + "TSO ", + "TxCsumOffload ", + "RxCsumGood ", + "VLANextractions ", + "VLANinsertions ", +}; + +static int get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(stats_strings); + default: + return -EOPNOTSUPP; + } +} + +#define T4_REGMAP_SIZE (160 * 1024) + +static int get_regs_len(struct net_device *dev) +{ + return T4_REGMAP_SIZE; +} + +static int get_eeprom_len(struct net_device *dev) +{ + return EEPROMSIZE; +} + +static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct adapter *adapter = netdev2adap(dev); + + strcpy(info->driver, KBUILD_MODNAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->bus_info, pci_name(adapter->pdev)); + + if (!adapter->params.fw_vers) + strcpy(info->fw_version, "N/A"); + else + snprintf(info->fw_version, sizeof(info->fw_version), + "%u.%u.%u.%u, TP %u.%u.%u.%u", + FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers), + FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers), + FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers), + FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers), + FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers), + FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers), + FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers), + FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers)); +} + +static void get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + if (stringset == ETH_SS_STATS) + memcpy(data, stats_strings, sizeof(stats_strings)); +} + +/* + * port stats maintained per queue of the port. They should be in the same + * order as in stats_strings above. + */ +struct queue_port_stats { + u64 tso; + u64 tx_csum; + u64 rx_csum; + u64 vlan_ex; + u64 vlan_ins; +}; + +static void collect_sge_port_stats(const struct adapter *adap, + const struct port_info *p, struct queue_port_stats *s) +{ + int i; + const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset]; + const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset]; + + memset(s, 0, sizeof(*s)); + for (i = 0; i < p->nqsets; i++, rx++, tx++) { + s->tso += tx->tso; + s->tx_csum += tx->tx_cso; + s->rx_csum += rx->stats.rx_cso; + s->vlan_ex += rx->stats.vlan_ex; + s->vlan_ins += tx->vlan_ins; + } +} + +static void get_stats(struct net_device *dev, struct ethtool_stats *stats, + u64 *data) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data); + + data += sizeof(struct port_stats) / sizeof(u64); + collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); +} + +/* + * Return a version number to identify the type of adapter. The scheme is: + * - bits 0..9: chip version + * - bits 10..15: chip revision + */ +static inline unsigned int mk_adap_vers(const struct adapter *ap) +{ + return 4 | (ap->params.rev << 10); +} + +static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start, + unsigned int end) +{ + u32 *p = buf + start; + + for ( ; start <= end; start += sizeof(u32)) + *p++ = t4_read_reg(ap, start); +} + +static void get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *buf) +{ + static const unsigned int reg_ranges[] = { + 0x1008, 0x1108, + 0x1180, 0x11b4, + 0x11fc, 0x123c, + 0x1300, 0x173c, + 0x1800, 0x18fc, + 0x3000, 0x30d8, + 0x30e0, 0x5924, + 0x5960, 0x59d4, + 0x5a00, 0x5af8, + 0x6000, 0x6098, + 0x6100, 0x6150, + 0x6200, 0x6208, + 0x6240, 0x6248, + 0x6280, 0x6338, + 0x6370, 0x638c, + 0x6400, 0x643c, + 0x6500, 0x6524, + 0x6a00, 0x6a38, + 0x6a60, 0x6a78, + 0x6b00, 0x6b84, + 0x6bf0, 0x6c84, + 0x6cf0, 0x6d84, + 0x6df0, 0x6e84, + 0x6ef0, 0x6f84, + 0x6ff0, 0x7084, + 0x70f0, 0x7184, + 0x71f0, 0x7284, + 0x72f0, 0x7384, + 0x73f0, 0x7450, + 0x7500, 0x7530, + 0x7600, 0x761c, + 0x7680, 0x76cc, + 0x7700, 0x7798, + 0x77c0, 0x77fc, + 0x7900, 0x79fc, + 0x7b00, 0x7c38, + 0x7d00, 0x7efc, + 0x8dc0, 0x8e1c, + 0x8e30, 0x8e78, + 0x8ea0, 0x8f6c, + 0x8fc0, 0x9074, + 0x90fc, 0x90fc, + 0x9400, 0x9458, + 0x9600, 0x96bc, + 0x9800, 0x9808, + 0x9820, 0x983c, + 0x9850, 0x9864, + 0x9c00, 0x9c6c, + 0x9c80, 0x9cec, + 0x9d00, 0x9d6c, + 0x9d80, 0x9dec, + 0x9e00, 0x9e6c, + 0x9e80, 0x9eec, + 0x9f00, 0x9f6c, + 0x9f80, 0x9fec, + 0xd004, 0xd03c, + 0xdfc0, 0xdfe0, + 0xe000, 0xea7c, + 0xf000, 0x11190, + 0x19040, 0x19124, + 0x19150, 0x191b0, + 0x191d0, 0x191e8, + 0x19238, 0x1924c, + 0x193f8, 0x19474, + 0x19490, 0x194f8, + 0x19800, 0x19f30, + 0x1a000, 0x1a06c, + 0x1a0b0, 0x1a120, + 0x1a128, 0x1a138, + 0x1a190, 0x1a1c4, + 0x1a1fc, 0x1a1fc, + 0x1e040, 0x1e04c, + 0x1e240, 0x1e28c, + 0x1e2c0, 0x1e2c0, + 0x1e2e0, 0x1e2e0, + 0x1e300, 0x1e384, + 0x1e3c0, 0x1e3c8, + 0x1e440, 0x1e44c, + 0x1e640, 0x1e68c, + 0x1e6c0, 0x1e6c0, + 0x1e6e0, 0x1e6e0, + 0x1e700, 0x1e784, + 0x1e7c0, 0x1e7c8, + 0x1e840, 0x1e84c, + 0x1ea40, 0x1ea8c, + 0x1eac0, 0x1eac0, + 0x1eae0, 0x1eae0, + 0x1eb00, 0x1eb84, + 0x1ebc0, 0x1ebc8, + 0x1ec40, 0x1ec4c, + 0x1ee40, 0x1ee8c, + 0x1eec0, 0x1eec0, + 0x1eee0, 0x1eee0, + 0x1ef00, 0x1ef84, + 0x1efc0, 0x1efc8, + 0x1f040, 0x1f04c, + 0x1f240, 0x1f28c, + 0x1f2c0, 0x1f2c0, + 0x1f2e0, 0x1f2e0, + 0x1f300, 0x1f384, + 0x1f3c0, 0x1f3c8, + 0x1f440, 0x1f44c, + 0x1f640, 0x1f68c, + 0x1f6c0, 0x1f6c0, + 0x1f6e0, 0x1f6e0, + 0x1f700, 0x1f784, + 0x1f7c0, 0x1f7c8, + 0x1f840, 0x1f84c, + 0x1fa40, 0x1fa8c, + 0x1fac0, 0x1fac0, + 0x1fae0, 0x1fae0, + 0x1fb00, 0x1fb84, + 0x1fbc0, 0x1fbc8, + 0x1fc40, 0x1fc4c, + 0x1fe40, 0x1fe8c, + 0x1fec0, 0x1fec0, + 0x1fee0, 0x1fee0, + 0x1ff00, 0x1ff84, + 0x1ffc0, 0x1ffc8, + 0x20000, 0x2002c, + 0x20100, 0x2013c, + 0x20190, 0x201c8, + 0x20200, 0x20318, + 0x20400, 0x20528, + 0x20540, 0x20614, + 0x21000, 0x21040, + 0x2104c, 0x21060, + 0x210c0, 0x210ec, + 0x21200, 0x21268, + 0x21270, 0x21284, + 0x212fc, 0x21388, + 0x21400, 0x21404, + 0x21500, 0x21518, + 0x2152c, 0x2153c, + 0x21550, 0x21554, + 0x21600, 0x21600, + 0x21608, 0x21628, + 0x21630, 0x2163c, + 0x21700, 0x2171c, + 0x21780, 0x2178c, + 0x21800, 0x21c38, + 0x21c80, 0x21d7c, + 0x21e00, 0x21e04, + 0x22000, 0x2202c, + 0x22100, 0x2213c, + 0x22190, 0x221c8, + 0x22200, 0x22318, + 0x22400, 0x22528, + 0x22540, 0x22614, + 0x23000, 0x23040, + 0x2304c, 0x23060, + 0x230c0, 0x230ec, + 0x23200, 0x23268, + 0x23270, 0x23284, + 0x232fc, 0x23388, + 0x23400, 0x23404, + 0x23500, 0x23518, + 0x2352c, 0x2353c, + 0x23550, 0x23554, + 0x23600, 0x23600, + 0x23608, 0x23628, + 0x23630, 0x2363c, + 0x23700, 0x2371c, + 0x23780, 0x2378c, + 0x23800, 0x23c38, + 0x23c80, 0x23d7c, + 0x23e00, 0x23e04, + 0x24000, 0x2402c, + 0x24100, 0x2413c, + 0x24190, 0x241c8, + 0x24200, 0x24318, + 0x24400, 0x24528, + 0x24540, 0x24614, + 0x25000, 0x25040, + 0x2504c, 0x25060, + 0x250c0, 0x250ec, + 0x25200, 0x25268, + 0x25270, 0x25284, + 0x252fc, 0x25388, + 0x25400, 0x25404, + 0x25500, 0x25518, + 0x2552c, 0x2553c, + 0x25550, 0x25554, + 0x25600, 0x25600, + 0x25608, 0x25628, + 0x25630, 0x2563c, + 0x25700, 0x2571c, + 0x25780, 0x2578c, + 0x25800, 0x25c38, + 0x25c80, 0x25d7c, + 0x25e00, 0x25e04, + 0x26000, 0x2602c, + 0x26100, 0x2613c, + 0x26190, 0x261c8, + 0x26200, 0x26318, + 0x26400, 0x26528, + 0x26540, 0x26614, + 0x27000, 0x27040, + 0x2704c, 0x27060, + 0x270c0, 0x270ec, + 0x27200, 0x27268, + 0x27270, 0x27284, + 0x272fc, 0x27388, + 0x27400, 0x27404, + 0x27500, 0x27518, + 0x2752c, 0x2753c, + 0x27550, 0x27554, + 0x27600, 0x27600, + 0x27608, 0x27628, + 0x27630, 0x2763c, + 0x27700, 0x2771c, + 0x27780, 0x2778c, + 0x27800, 0x27c38, + 0x27c80, 0x27d7c, + 0x27e00, 0x27e04 + }; + + int i; + struct adapter *ap = netdev2adap(dev); + + regs->version = mk_adap_vers(ap); + + memset(buf, 0, T4_REGMAP_SIZE); + for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2) + reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]); +} + +static int restart_autoneg(struct net_device *dev) +{ + struct port_info *p = netdev_priv(dev); + + if (!netif_running(dev)) + return -EAGAIN; + if (p->link_cfg.autoneg != AUTONEG_ENABLE) + return -EINVAL; + t4_restart_aneg(p->adapter, 0, p->tx_chan); + return 0; +} + +static int identify_port(struct net_device *dev, u32 data) +{ + if (data == 0) + data = 2; /* default to 2 seconds */ + + return t4_identify_port(netdev2adap(dev), 0, netdev2pinfo(dev)->viid, + data * 5); +} + +static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps) +{ + unsigned int v = 0; + + if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XAUI) { + v |= SUPPORTED_TP; + if (caps & FW_PORT_CAP_SPEED_100M) + v |= SUPPORTED_100baseT_Full; + if (caps & FW_PORT_CAP_SPEED_1G) + v |= SUPPORTED_1000baseT_Full; + if (caps & FW_PORT_CAP_SPEED_10G) + v |= SUPPORTED_10000baseT_Full; + } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) { + v |= SUPPORTED_Backplane; + if (caps & FW_PORT_CAP_SPEED_1G) + v |= SUPPORTED_1000baseKX_Full; + if (caps & FW_PORT_CAP_SPEED_10G) + v |= SUPPORTED_10000baseKX4_Full; + } else if (type == FW_PORT_TYPE_KR) + v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full; + else if (type == FW_PORT_TYPE_FIBER) + v |= SUPPORTED_FIBRE; + + if (caps & FW_PORT_CAP_ANEG) + v |= SUPPORTED_Autoneg; + return v; +} + +static unsigned int to_fw_linkcaps(unsigned int caps) +{ + unsigned int v = 0; + + if (caps & ADVERTISED_100baseT_Full) + v |= FW_PORT_CAP_SPEED_100M; + if (caps & ADVERTISED_1000baseT_Full) + v |= FW_PORT_CAP_SPEED_1G; + if (caps & ADVERTISED_10000baseT_Full) + v |= FW_PORT_CAP_SPEED_10G; + return v; +} + +static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + const struct port_info *p = netdev_priv(dev); + + if (p->port_type == FW_PORT_TYPE_BT_SGMII || + p->port_type == FW_PORT_TYPE_BT_XAUI) + cmd->port = PORT_TP; + else if (p->port_type == FW_PORT_TYPE_FIBER) + cmd->port = PORT_FIBRE; + else if (p->port_type == FW_PORT_TYPE_TWINAX) + cmd->port = PORT_DA; + else + cmd->port = PORT_OTHER; + + if (p->mdio_addr >= 0) { + cmd->phy_address = p->mdio_addr; + cmd->transceiver = XCVR_EXTERNAL; + cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ? + MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45; + } else { + cmd->phy_address = 0; /* not really, but no better option */ + cmd->transceiver = XCVR_INTERNAL; + cmd->mdio_support = 0; + } + + cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported); + cmd->advertising = from_fw_linkcaps(p->port_type, + p->link_cfg.advertising); + cmd->speed = netif_carrier_ok(dev) ? p->link_cfg.speed : 0; + cmd->duplex = DUPLEX_FULL; + cmd->autoneg = p->link_cfg.autoneg; + cmd->maxtxpkt = 0; + cmd->maxrxpkt = 0; + return 0; +} + +static unsigned int speed_to_caps(int speed) +{ + if (speed == SPEED_100) + return FW_PORT_CAP_SPEED_100M; + if (speed == SPEED_1000) + return FW_PORT_CAP_SPEED_1G; + if (speed == SPEED_10000) + return FW_PORT_CAP_SPEED_10G; + return 0; +} + +static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + unsigned int cap; + struct port_info *p = netdev_priv(dev); + struct link_config *lc = &p->link_cfg; + + if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */ + return -EINVAL; + + if (!(lc->supported & FW_PORT_CAP_ANEG)) { + /* + * PHY offers a single speed. See if that's what's + * being requested. + */ + if (cmd->autoneg == AUTONEG_DISABLE && + (lc->supported & speed_to_caps(cmd->speed))) + return 0; + return -EINVAL; + } + + if (cmd->autoneg == AUTONEG_DISABLE) { + cap = speed_to_caps(cmd->speed); + + if (!(lc->supported & cap) || cmd->speed == SPEED_1000 || + cmd->speed == SPEED_10000) + return -EINVAL; + lc->requested_speed = cap; + lc->advertising = 0; + } else { + cap = to_fw_linkcaps(cmd->advertising); + if (!(lc->supported & cap)) + return -EINVAL; + lc->requested_speed = 0; + lc->advertising = cap | FW_PORT_CAP_ANEG; + } + lc->autoneg = cmd->autoneg; + + if (netif_running(dev)) + return t4_link_start(p->adapter, 0, p->tx_chan, lc); + return 0; +} + +static void get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct port_info *p = netdev_priv(dev); + + epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0; + epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0; + epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0; +} + +static int set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct port_info *p = netdev_priv(dev); + struct link_config *lc = &p->link_cfg; + + if (epause->autoneg == AUTONEG_DISABLE) + lc->requested_fc = 0; + else if (lc->supported & FW_PORT_CAP_ANEG) + lc->requested_fc = PAUSE_AUTONEG; + else + return -EINVAL; + + if (epause->rx_pause) + lc->requested_fc |= PAUSE_RX; + if (epause->tx_pause) + lc->requested_fc |= PAUSE_TX; + if (netif_running(dev)) + return t4_link_start(p->adapter, 0, p->tx_chan, lc); + return 0; +} + +static u32 get_rx_csum(struct net_device *dev) +{ + struct port_info *p = netdev_priv(dev); + + return p->rx_offload & RX_CSO; +} + +static int set_rx_csum(struct net_device *dev, u32 data) +{ + struct port_info *p = netdev_priv(dev); + + if (data) + p->rx_offload |= RX_CSO; + else + p->rx_offload &= ~RX_CSO; + return 0; +} + +static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) +{ + const struct port_info *pi = netdev_priv(dev); + const struct sge *s = &pi->adapter->sge; + + e->rx_max_pending = MAX_RX_BUFFERS; + e->rx_mini_max_pending = MAX_RSPQ_ENTRIES; + e->rx_jumbo_max_pending = 0; + e->tx_max_pending = MAX_TXQ_ENTRIES; + + e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8; + e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size; + e->rx_jumbo_pending = 0; + e->tx_pending = s->ethtxq[pi->first_qset].q.size; +} + +static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) +{ + int i; + const struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + struct sge *s = &adapter->sge; + + if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending || + e->tx_pending > MAX_TXQ_ENTRIES || + e->rx_mini_pending > MAX_RSPQ_ENTRIES || + e->rx_mini_pending < MIN_RSPQ_ENTRIES || + e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES) + return -EINVAL; + + if (adapter->flags & FULL_INIT_DONE) + return -EBUSY; + + for (i = 0; i < pi->nqsets; ++i) { + s->ethtxq[pi->first_qset + i].q.size = e->tx_pending; + s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8; + s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending; + } + return 0; +} + +static int closest_timer(const struct sge *s, int time) +{ + int i, delta, match = 0, min_delta = INT_MAX; + + for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) { + delta = time - s->timer_val[i]; + if (delta < 0) + delta = -delta; + if (delta < min_delta) { + min_delta = delta; + match = i; + } + } + return match; +} + +static int closest_thres(const struct sge *s, int thres) +{ + int i, delta, match = 0, min_delta = INT_MAX; + + for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) { + delta = thres - s->counter_val[i]; + if (delta < 0) + delta = -delta; + if (delta < min_delta) { + min_delta = delta; + match = i; + } + } + return match; +} + +/* + * Return a queue's interrupt hold-off time in us. 0 means no timer. + */ +static unsigned int qtimer_val(const struct adapter *adap, + const struct sge_rspq *q) +{ + unsigned int idx = q->intr_params >> 1; + + return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0; +} + +/** + * set_rxq_intr_params - set a queue's interrupt holdoff parameters + * @adap: the adapter + * @q: the Rx queue + * @us: the hold-off time in us, or 0 to disable timer + * @cnt: the hold-off packet count, or 0 to disable counter + * + * Sets an Rx queue's interrupt hold-off time and packet count. At least + * one of the two needs to be enabled for the queue to generate interrupts. + */ +static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q, + unsigned int us, unsigned int cnt) +{ + if ((us | cnt) == 0) + cnt = 1; + + if (cnt) { + int err; + u32 v, new_idx; + + new_idx = closest_thres(&adap->sge, cnt); + if (q->desc && q->pktcnt_idx != new_idx) { + /* the queue has already been created, update it */ + v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | + FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | + FW_PARAMS_PARAM_YZ(q->cntxt_id); + err = t4_set_params(adap, 0, 0, 0, 1, &v, &new_idx); + if (err) + return err; + } + q->pktcnt_idx = new_idx; + } + + us = us == 0 ? 6 : closest_timer(&adap->sge, us); + q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0); + return 0; +} + +static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +{ + const struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + + return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq, + c->rx_coalesce_usecs, c->rx_max_coalesced_frames); +} + +static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +{ + const struct port_info *pi = netdev_priv(dev); + const struct adapter *adap = pi->adapter; + const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq; + + c->rx_coalesce_usecs = qtimer_val(adap, rq); + c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ? + adap->sge.counter_val[rq->pktcnt_idx] : 0; + return 0; +} + +/* + * Translate a physical EEPROM address to virtual. The first 1K is accessed + * through virtual addresses starting at 31K, the rest is accessed through + * virtual addresses starting at 0. This mapping is correct only for PF0. + */ +static int eeprom_ptov(unsigned int phys_addr) +{ + if (phys_addr < 1024) + return phys_addr + (31 << 10); + if (phys_addr < EEPROMSIZE) + return phys_addr - 1024; + return -EINVAL; +} + +/* + * The next two routines implement eeprom read/write from physical addresses. + * The physical->virtual translation is correct only for PF0. + */ +static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v) +{ + int vaddr = eeprom_ptov(phys_addr); + + if (vaddr >= 0) + vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v); + return vaddr < 0 ? vaddr : 0; +} + +static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v) +{ + int vaddr = eeprom_ptov(phys_addr); + + if (vaddr >= 0) + vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v); + return vaddr < 0 ? vaddr : 0; +} + +#define EEPROM_MAGIC 0x38E2F10C + +static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, + u8 *data) +{ + int i, err = 0; + struct adapter *adapter = netdev2adap(dev); + + u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + e->magic = EEPROM_MAGIC; + for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4) + err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]); + + if (!err) + memcpy(data, buf + e->offset, e->len); + kfree(buf); + return err; +} + +static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *data) +{ + u8 *buf; + int err = 0; + u32 aligned_offset, aligned_len, *p; + struct adapter *adapter = netdev2adap(dev); + + if (eeprom->magic != EEPROM_MAGIC) + return -EINVAL; + + aligned_offset = eeprom->offset & ~3; + aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3; + + if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) { + /* + * RMW possibly needed for first or last words. + */ + buf = kmalloc(aligned_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf); + if (!err && aligned_len > 4) + err = eeprom_rd_phys(adapter, + aligned_offset + aligned_len - 4, + (u32 *)&buf[aligned_len - 4]); + if (err) + goto out; + memcpy(buf + (eeprom->offset & 3), data, eeprom->len); + } else + buf = data; + + err = t4_seeprom_wp(adapter, false); + if (err) + goto out; + + for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) { + err = eeprom_wr_phys(adapter, aligned_offset, *p); + aligned_offset += 4; + } + + if (!err) + err = t4_seeprom_wp(adapter, true); +out: + if (buf != data) + kfree(buf); + return err; +} + +static int set_flash(struct net_device *netdev, struct ethtool_flash *ef) +{ + int ret; + const struct firmware *fw; + struct adapter *adap = netdev2adap(netdev); + + ef->data[sizeof(ef->data) - 1] = '\0'; + ret = request_firmware(&fw, ef->data, adap->pdev_dev); + if (ret < 0) + return ret; + + ret = t4_load_fw(adap, fw->data, fw->size); + release_firmware(fw); + if (!ret) + dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data); + return ret; +} + +#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC) +#define BCAST_CRC 0xa0ccc1a6 + +static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + wol->supported = WAKE_BCAST | WAKE_MAGIC; + wol->wolopts = netdev2adap(dev)->wol; + memset(&wol->sopass, 0, sizeof(wol->sopass)); +} + +static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + int err = 0; + struct port_info *pi = netdev_priv(dev); + + if (wol->wolopts & ~WOL_SUPPORTED) + return -EINVAL; + t4_wol_magic_enable(pi->adapter, pi->tx_chan, + (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL); + if (wol->wolopts & WAKE_BCAST) { + err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL, + ~0ULL, 0, false); + if (!err) + err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1, + ~6ULL, ~0ULL, BCAST_CRC, true); + } else + t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false); + return err; +} + +static int set_tso(struct net_device *dev, u32 value) +{ + if (value) + dev->features |= NETIF_F_TSO | NETIF_F_TSO6; + else + dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); + return 0; +} + +static struct ethtool_ops cxgb_ethtool_ops = { + .get_settings = get_settings, + .set_settings = set_settings, + .get_drvinfo = get_drvinfo, + .get_msglevel = get_msglevel, + .set_msglevel = set_msglevel, + .get_ringparam = get_sge_param, + .set_ringparam = set_sge_param, + .get_coalesce = get_coalesce, + .set_coalesce = set_coalesce, + .get_eeprom_len = get_eeprom_len, + .get_eeprom = get_eeprom, + .set_eeprom = set_eeprom, + .get_pauseparam = get_pauseparam, + .set_pauseparam = set_pauseparam, + .get_rx_csum = get_rx_csum, + .set_rx_csum = set_rx_csum, + .set_tx_csum = ethtool_op_set_tx_ipv6_csum, + .set_sg = ethtool_op_set_sg, + .get_link = ethtool_op_get_link, + .get_strings = get_strings, + .phys_id = identify_port, + .nway_reset = restart_autoneg, + .get_sset_count = get_sset_count, + .get_ethtool_stats = get_stats, + .get_regs_len = get_regs_len, + .get_regs = get_regs, + .get_wol = get_wol, + .set_wol = set_wol, + .set_tso = set_tso, + .flash_device = set_flash, +}; + +/* + * debugfs support + */ + +static int mem_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t mem_read(struct file *file, char __user *buf, size_t count, + loff_t *ppos) +{ + loff_t pos = *ppos; + loff_t avail = file->f_path.dentry->d_inode->i_size; + unsigned int mem = (uintptr_t)file->private_data & 3; + struct adapter *adap = file->private_data - mem; + + if (pos < 0) + return -EINVAL; + if (pos >= avail) + return 0; + if (count > avail - pos) + count = avail - pos; + + while (count) { + size_t len; + int ret, ofst; + __be32 data[16]; + + if (mem == MEM_MC) + ret = t4_mc_read(adap, pos, data, NULL); + else + ret = t4_edc_read(adap, mem, pos, data, NULL); + if (ret) + return ret; + + ofst = pos % sizeof(data); + len = min(count, sizeof(data) - ofst); + if (copy_to_user(buf, (u8 *)data + ofst, len)) + return -EFAULT; + + buf += len; + pos += len; + count -= len; + } + count = pos - *ppos; + *ppos = pos; + return count; +} + +static const struct file_operations mem_debugfs_fops = { + .owner = THIS_MODULE, + .open = mem_open, + .read = mem_read, +}; + +static void __devinit add_debugfs_mem(struct adapter *adap, const char *name, + unsigned int idx, unsigned int size_mb) +{ + struct dentry *de; + + de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root, + (void *)adap + idx, &mem_debugfs_fops); + if (de && de->d_inode) + de->d_inode->i_size = size_mb << 20; +} + +static int __devinit setup_debugfs(struct adapter *adap) +{ + int i; + + if (IS_ERR_OR_NULL(adap->debugfs_root)) + return -1; + + i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE); + if (i & EDRAM0_ENABLE) + add_debugfs_mem(adap, "edc0", MEM_EDC0, 5); + if (i & EDRAM1_ENABLE) + add_debugfs_mem(adap, "edc1", MEM_EDC1, 5); + if (i & EXT_MEM_ENABLE) + add_debugfs_mem(adap, "mc", MEM_MC, + EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR))); + if (adap->l2t) + debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap, + &t4_l2t_fops); + return 0; +} + +/* + * upper-layer driver support + */ + +/* + * Allocate an active-open TID and set it to the supplied value. + */ +int cxgb4_alloc_atid(struct tid_info *t, void *data) +{ + int atid = -1; + + spin_lock_bh(&t->atid_lock); + if (t->afree) { + union aopen_entry *p = t->afree; + + atid = p - t->atid_tab; + t->afree = p->next; + p->data = data; + t->atids_in_use++; + } + spin_unlock_bh(&t->atid_lock); + return atid; +} +EXPORT_SYMBOL(cxgb4_alloc_atid); + +/* + * Release an active-open TID. + */ +void cxgb4_free_atid(struct tid_info *t, unsigned int atid) +{ + union aopen_entry *p = &t->atid_tab[atid]; + + spin_lock_bh(&t->atid_lock); + p->next = t->afree; + t->afree = p; + t->atids_in_use--; + spin_unlock_bh(&t->atid_lock); +} +EXPORT_SYMBOL(cxgb4_free_atid); + +/* + * Allocate a server TID and set it to the supplied value. + */ +int cxgb4_alloc_stid(struct tid_info *t, int family, void *data) +{ + int stid; + + spin_lock_bh(&t->stid_lock); + if (family == PF_INET) { + stid = find_first_zero_bit(t->stid_bmap, t->nstids); + if (stid < t->nstids) + __set_bit(stid, t->stid_bmap); + else + stid = -1; + } else { + stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2); + if (stid < 0) + stid = -1; + } + if (stid >= 0) { + t->stid_tab[stid].data = data; + stid += t->stid_base; + t->stids_in_use++; + } + spin_unlock_bh(&t->stid_lock); + return stid; +} +EXPORT_SYMBOL(cxgb4_alloc_stid); + +/* + * Release a server TID. + */ +void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) +{ + stid -= t->stid_base; + spin_lock_bh(&t->stid_lock); + if (family == PF_INET) + __clear_bit(stid, t->stid_bmap); + else + bitmap_release_region(t->stid_bmap, stid, 2); + t->stid_tab[stid].data = NULL; + t->stids_in_use--; + spin_unlock_bh(&t->stid_lock); +} +EXPORT_SYMBOL(cxgb4_free_stid); + +/* + * Populate a TID_RELEASE WR. Caller must properly size the skb. + */ +static void mk_tid_release(struct sk_buff *skb, unsigned int chan, + unsigned int tid) +{ + struct cpl_tid_release *req; + + set_wr_txq(skb, CPL_PRIORITY_SETUP, chan); + req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req)); + INIT_TP_WR(req, tid); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid)); +} + +/* + * Queue a TID release request and if necessary schedule a work queue to + * process it. + */ +void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, + unsigned int tid) +{ + void **p = &t->tid_tab[tid]; + struct adapter *adap = container_of(t, struct adapter, tids); + + spin_lock_bh(&adap->tid_release_lock); + *p = adap->tid_release_head; + /* Low 2 bits encode the Tx channel number */ + adap->tid_release_head = (void **)((uintptr_t)p | chan); + if (!adap->tid_release_task_busy) { + adap->tid_release_task_busy = true; + schedule_work(&adap->tid_release_task); + } + spin_unlock_bh(&adap->tid_release_lock); +} +EXPORT_SYMBOL(cxgb4_queue_tid_release); + +/* + * Process the list of pending TID release requests. + */ +static void process_tid_release_list(struct work_struct *work) +{ + struct sk_buff *skb; + struct adapter *adap; + + adap = container_of(work, struct adapter, tid_release_task); + + spin_lock_bh(&adap->tid_release_lock); + while (adap->tid_release_head) { + void **p = adap->tid_release_head; + unsigned int chan = (uintptr_t)p & 3; + p = (void *)p - chan; + + adap->tid_release_head = *p; + *p = NULL; + spin_unlock_bh(&adap->tid_release_lock); + + while (!(skb = alloc_skb(sizeof(struct cpl_tid_release), + GFP_KERNEL))) + schedule_timeout_uninterruptible(1); + + mk_tid_release(skb, chan, p - adap->tids.tid_tab); + t4_ofld_send(adap, skb); + spin_lock_bh(&adap->tid_release_lock); + } + adap->tid_release_task_busy = false; + spin_unlock_bh(&adap->tid_release_lock); +} + +/* + * Release a TID and inform HW. If we are unable to allocate the release + * message we defer to a work queue. + */ +void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid) +{ + void *old; + struct sk_buff *skb; + struct adapter *adap = container_of(t, struct adapter, tids); + + old = t->tid_tab[tid]; + skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC); + if (likely(skb)) { + t->tid_tab[tid] = NULL; + mk_tid_release(skb, chan, tid); + t4_ofld_send(adap, skb); + } else + cxgb4_queue_tid_release(t, chan, tid); + if (old) + atomic_dec(&t->tids_in_use); +} +EXPORT_SYMBOL(cxgb4_remove_tid); + +/* + * Allocate and initialize the TID tables. Returns 0 on success. + */ +static int tid_init(struct tid_info *t) +{ + size_t size; + unsigned int natids = t->natids; + + size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) + + t->nstids * sizeof(*t->stid_tab) + + BITS_TO_LONGS(t->nstids) * sizeof(long); + t->tid_tab = t4_alloc_mem(size); + if (!t->tid_tab) + return -ENOMEM; + + t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids]; + t->stid_tab = (struct serv_entry *)&t->atid_tab[natids]; + t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids]; + spin_lock_init(&t->stid_lock); + spin_lock_init(&t->atid_lock); + + t->stids_in_use = 0; + t->afree = NULL; + t->atids_in_use = 0; + atomic_set(&t->tids_in_use, 0); + + /* Setup the free list for atid_tab and clear the stid bitmap. */ + if (natids) { + while (--natids) + t->atid_tab[natids - 1].next = &t->atid_tab[natids]; + t->afree = t->atid_tab; + } + bitmap_zero(t->stid_bmap, t->nstids); + return 0; +} + +/** + * cxgb4_create_server - create an IP server + * @dev: the device + * @stid: the server TID + * @sip: local IP address to bind server to + * @sport: the server's TCP port + * @queue: queue to direct messages from this server to + * + * Create an IP server for the given port and address. + * Returns <0 on error and one of the %NET_XMIT_* values on success. + */ +int cxgb4_create_server(const struct net_device *dev, unsigned int stid, + __be32 sip, __be16 sport, unsigned int queue) +{ + unsigned int chan; + struct sk_buff *skb; + struct adapter *adap; + struct cpl_pass_open_req *req; + + skb = alloc_skb(sizeof(*req), GFP_KERNEL); + if (!skb) + return -ENOMEM; + + adap = netdev2adap(dev); + req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req)); + INIT_TP_WR(req, 0); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid)); + req->local_port = sport; + req->peer_port = htons(0); + req->local_ip = sip; + req->peer_ip = htonl(0); + chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan; + req->opt0 = cpu_to_be64(TX_CHAN(chan)); + req->opt1 = cpu_to_be64(CONN_POLICY_ASK | + SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); + return t4_mgmt_tx(adap, skb); +} +EXPORT_SYMBOL(cxgb4_create_server); + +/** + * cxgb4_create_server6 - create an IPv6 server + * @dev: the device + * @stid: the server TID + * @sip: local IPv6 address to bind server to + * @sport: the server's TCP port + * @queue: queue to direct messages from this server to + * + * Create an IPv6 server for the given port and address. + * Returns <0 on error and one of the %NET_XMIT_* values on success. + */ +int cxgb4_create_server6(const struct net_device *dev, unsigned int stid, + const struct in6_addr *sip, __be16 sport, + unsigned int queue) +{ + unsigned int chan; + struct sk_buff *skb; + struct adapter *adap; + struct cpl_pass_open_req6 *req; + + skb = alloc_skb(sizeof(*req), GFP_KERNEL); + if (!skb) + return -ENOMEM; + + adap = netdev2adap(dev); + req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req)); + INIT_TP_WR(req, 0); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid)); + req->local_port = sport; + req->peer_port = htons(0); + req->local_ip_hi = *(__be64 *)(sip->s6_addr); + req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8); + req->peer_ip_hi = cpu_to_be64(0); + req->peer_ip_lo = cpu_to_be64(0); + chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan; + req->opt0 = cpu_to_be64(TX_CHAN(chan)); + req->opt1 = cpu_to_be64(CONN_POLICY_ASK | + SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); + return t4_mgmt_tx(adap, skb); +} +EXPORT_SYMBOL(cxgb4_create_server6); + +/** + * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU + * @mtus: the HW MTU table + * @mtu: the target MTU + * @idx: index of selected entry in the MTU table + * + * Returns the index and the value in the HW MTU table that is closest to + * but does not exceed @mtu, unless @mtu is smaller than any value in the + * table, in which case that smallest available value is selected. + */ +unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu, + unsigned int *idx) +{ + unsigned int i = 0; + + while (i < NMTUS - 1 && mtus[i + 1] <= mtu) + ++i; + if (idx) + *idx = i; + return mtus[i]; +} +EXPORT_SYMBOL(cxgb4_best_mtu); + +/** + * cxgb4_port_chan - get the HW channel of a port + * @dev: the net device for the port + * + * Return the HW Tx channel of the given port. + */ +unsigned int cxgb4_port_chan(const struct net_device *dev) +{ + return netdev2pinfo(dev)->tx_chan; +} +EXPORT_SYMBOL(cxgb4_port_chan); + +/** + * cxgb4_port_viid - get the VI id of a port + * @dev: the net device for the port + * + * Return the VI id of the given port. + */ +unsigned int cxgb4_port_viid(const struct net_device *dev) +{ + return netdev2pinfo(dev)->viid; +} +EXPORT_SYMBOL(cxgb4_port_viid); + +/** + * cxgb4_port_idx - get the index of a port + * @dev: the net device for the port + * + * Return the index of the given port. + */ +unsigned int cxgb4_port_idx(const struct net_device *dev) +{ + return netdev2pinfo(dev)->port_id; +} +EXPORT_SYMBOL(cxgb4_port_idx); + +/** + * cxgb4_netdev_by_hwid - return the net device of a HW port + * @pdev: identifies the adapter + * @id: the HW port id + * + * Return the net device associated with the interface with the given HW + * id. + */ +struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id) +{ + const struct adapter *adap = pci_get_drvdata(pdev); + + if (!adap || id >= NCHAN) + return NULL; + id = adap->chan_map[id]; + return id < MAX_NPORTS ? adap->port[id] : NULL; +} +EXPORT_SYMBOL(cxgb4_netdev_by_hwid); + +void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, + struct tp_tcp_stats *v6) +{ + struct adapter *adap = pci_get_drvdata(pdev); + + spin_lock(&adap->stats_lock); + t4_tp_get_tcp_stats(adap, v4, v6); + spin_unlock(&adap->stats_lock); +} +EXPORT_SYMBOL(cxgb4_get_tcp_stats); + +void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask, + const unsigned int *pgsz_order) +{ + struct adapter *adap = netdev2adap(dev); + + t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask); + t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) | + HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) | + HPZ3(pgsz_order[3])); +} +EXPORT_SYMBOL(cxgb4_iscsi_init); + +static struct pci_driver cxgb4_driver; + +static void check_neigh_update(struct neighbour *neigh) +{ + const struct device *parent; + const struct net_device *netdev = neigh->dev; + + if (netdev->priv_flags & IFF_802_1Q_VLAN) + netdev = vlan_dev_real_dev(netdev); + parent = netdev->dev.parent; + if (parent && parent->driver == &cxgb4_driver.driver) + t4_l2t_update(dev_get_drvdata(parent), neigh); +} + +static int netevent_cb(struct notifier_block *nb, unsigned long event, + void *data) +{ + switch (event) { + case NETEVENT_NEIGH_UPDATE: + check_neigh_update(data); + break; + case NETEVENT_PMTU_UPDATE: + case NETEVENT_REDIRECT: + default: + break; + } + return 0; +} + +static bool netevent_registered; +static struct notifier_block cxgb4_netevent_nb = { + .notifier_call = netevent_cb +}; + +static void uld_attach(struct adapter *adap, unsigned int uld) +{ + void *handle; + struct cxgb4_lld_info lli; + + lli.pdev = adap->pdev; + lli.l2t = adap->l2t; + lli.tids = &adap->tids; + lli.ports = adap->port; + lli.vr = &adap->vres; + lli.mtus = adap->params.mtus; + if (uld == CXGB4_ULD_RDMA) { + lli.rxq_ids = adap->sge.rdma_rxq; + lli.nrxq = adap->sge.rdmaqs; + } else if (uld == CXGB4_ULD_ISCSI) { + lli.rxq_ids = adap->sge.ofld_rxq; + lli.nrxq = adap->sge.ofldqsets; + } + lli.ntxq = adap->sge.ofldqsets; + lli.nchan = adap->params.nports; + lli.nports = adap->params.nports; + lli.wr_cred = adap->params.ofldq_wr_cred; + lli.adapter_type = adap->params.rev; + lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2)); + lli.udb_density = 1 << QUEUESPERPAGEPF0_GET( + t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF)); + lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET( + t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF)); + lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS); + lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL); + lli.fw_vers = adap->params.fw_vers; + + handle = ulds[uld].add(&lli); + if (IS_ERR(handle)) { + dev_warn(adap->pdev_dev, + "could not attach to the %s driver, error %ld\n", + uld_str[uld], PTR_ERR(handle)); + return; + } + + adap->uld_handle[uld] = handle; + + if (!netevent_registered) { + register_netevent_notifier(&cxgb4_netevent_nb); + netevent_registered = true; + } +} + +static void attach_ulds(struct adapter *adap) +{ + unsigned int i; + + mutex_lock(&uld_mutex); + list_add_tail(&adap->list_node, &adapter_list); + for (i = 0; i < CXGB4_ULD_MAX; i++) + if (ulds[i].add) + uld_attach(adap, i); + mutex_unlock(&uld_mutex); +} + +static void detach_ulds(struct adapter *adap) +{ + unsigned int i; + + mutex_lock(&uld_mutex); + list_del(&adap->list_node); + for (i = 0; i < CXGB4_ULD_MAX; i++) + if (adap->uld_handle[i]) { + ulds[i].state_change(adap->uld_handle[i], + CXGB4_STATE_DETACH); + adap->uld_handle[i] = NULL; + } + if (netevent_registered && list_empty(&adapter_list)) { + unregister_netevent_notifier(&cxgb4_netevent_nb); + netevent_registered = false; + } + mutex_unlock(&uld_mutex); +} + +static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state) +{ + unsigned int i; + + mutex_lock(&uld_mutex); + for (i = 0; i < CXGB4_ULD_MAX; i++) + if (adap->uld_handle[i]) + ulds[i].state_change(adap->uld_handle[i], new_state); + mutex_unlock(&uld_mutex); +} + +/** + * cxgb4_register_uld - register an upper-layer driver + * @type: the ULD type + * @p: the ULD methods + * + * Registers an upper-layer driver with this driver and notifies the ULD + * about any presently available devices that support its type. Returns + * %-EBUSY if a ULD of the same type is already registered. + */ +int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p) +{ + int ret = 0; + struct adapter *adap; + + if (type >= CXGB4_ULD_MAX) + return -EINVAL; + mutex_lock(&uld_mutex); + if (ulds[type].add) { + ret = -EBUSY; + goto out; + } + ulds[type] = *p; + list_for_each_entry(adap, &adapter_list, list_node) + uld_attach(adap, type); +out: mutex_unlock(&uld_mutex); + return ret; +} +EXPORT_SYMBOL(cxgb4_register_uld); + +/** + * cxgb4_unregister_uld - unregister an upper-layer driver + * @type: the ULD type + * + * Unregisters an existing upper-layer driver. + */ +int cxgb4_unregister_uld(enum cxgb4_uld type) +{ + struct adapter *adap; + + if (type >= CXGB4_ULD_MAX) + return -EINVAL; + mutex_lock(&uld_mutex); + list_for_each_entry(adap, &adapter_list, list_node) + adap->uld_handle[type] = NULL; + ulds[type].add = NULL; + mutex_unlock(&uld_mutex); + return 0; +} +EXPORT_SYMBOL(cxgb4_unregister_uld); + +/** + * cxgb_up - enable the adapter + * @adap: adapter being enabled + * + * Called when the first port is enabled, this function performs the + * actions necessary to make an adapter operational, such as completing + * the initialization of HW modules, and enabling interrupts. + * + * Must be called with the rtnl lock held. + */ +static int cxgb_up(struct adapter *adap) +{ + int err = 0; + + if (!(adap->flags & FULL_INIT_DONE)) { + err = setup_sge_queues(adap); + if (err) + goto out; + err = setup_rss(adap); + if (err) { + t4_free_sge_resources(adap); + goto out; + } + if (adap->flags & USING_MSIX) + name_msix_vecs(adap); + adap->flags |= FULL_INIT_DONE; + } + + if (adap->flags & USING_MSIX) { + err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0, + adap->msix_info[0].desc, adap); + if (err) + goto irq_err; + + err = request_msix_queue_irqs(adap); + if (err) { + free_irq(adap->msix_info[0].vec, adap); + goto irq_err; + } + } else { + err = request_irq(adap->pdev->irq, t4_intr_handler(adap), + (adap->flags & USING_MSI) ? 0 : IRQF_SHARED, + adap->name, adap); + if (err) + goto irq_err; + } + enable_rx(adap); + t4_sge_start(adap); + t4_intr_enable(adap); + notify_ulds(adap, CXGB4_STATE_UP); + out: + return err; + irq_err: + dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); + goto out; +} + +static void cxgb_down(struct adapter *adapter) +{ + t4_intr_disable(adapter); + cancel_work_sync(&adapter->tid_release_task); + adapter->tid_release_task_busy = false; + + if (adapter->flags & USING_MSIX) { + free_msix_queue_irqs(adapter); + free_irq(adapter->msix_info[0].vec, adapter); + } else + free_irq(adapter->pdev->irq, adapter); + quiesce_rx(adapter); +} + +/* + * net_device operations + */ +static int cxgb_open(struct net_device *dev) +{ + int err; + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) + return err; + + dev->real_num_tx_queues = pi->nqsets; + set_bit(pi->tx_chan, &adapter->open_device_map); + link_start(dev); + netif_tx_start_all_queues(dev); + return 0; +} + +static int cxgb_close(struct net_device *dev) +{ + int ret; + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + netif_tx_stop_all_queues(dev); + netif_carrier_off(dev); + ret = t4_enable_vi(adapter, 0, pi->viid, false, false); + + clear_bit(pi->tx_chan, &adapter->open_device_map); + + if (!adapter->open_device_map) + cxgb_down(adapter); + return 0; +} + +static struct net_device_stats *cxgb_get_stats(struct net_device *dev) +{ + struct port_stats stats; + struct port_info *p = netdev_priv(dev); + struct adapter *adapter = p->adapter; + struct net_device_stats *ns = &dev->stats; + + spin_lock(&adapter->stats_lock); + t4_get_port_stats(adapter, p->tx_chan, &stats); + spin_unlock(&adapter->stats_lock); + + ns->tx_bytes = stats.tx_octets; + ns->tx_packets = stats.tx_frames; + ns->rx_bytes = stats.rx_octets; + ns->rx_packets = stats.rx_frames; + ns->multicast = stats.rx_mcast_frames; + + /* detailed rx_errors */ + ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long + + stats.rx_runt; + ns->rx_over_errors = 0; + ns->rx_crc_errors = stats.rx_fcs_err; + ns->rx_frame_errors = stats.rx_symbol_err; + ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 + + stats.rx_ovflow2 + stats.rx_ovflow3 + + stats.rx_trunc0 + stats.rx_trunc1 + + stats.rx_trunc2 + stats.rx_trunc3; + ns->rx_missed_errors = 0; + + /* detailed tx_errors */ + ns->tx_aborted_errors = 0; + ns->tx_carrier_errors = 0; + ns->tx_fifo_errors = 0; + ns->tx_heartbeat_errors = 0; + ns->tx_window_errors = 0; + + ns->tx_errors = stats.tx_error_frames; + ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err + + ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors; + return ns; +} + +static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) +{ + int ret = 0, prtad, devad; + struct port_info *pi = netdev_priv(dev); + struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data; + + switch (cmd) { + case SIOCGMIIPHY: + if (pi->mdio_addr < 0) + return -EOPNOTSUPP; + data->phy_id = pi->mdio_addr; + break; + case SIOCGMIIREG: + case SIOCSMIIREG: + if (mdio_phy_id_is_c45(data->phy_id)) { + prtad = mdio_phy_id_prtad(data->phy_id); + devad = mdio_phy_id_devad(data->phy_id); + } else if (data->phy_id < 32) { + prtad = data->phy_id; + devad = 0; + data->reg_num &= 0x1f; + } else + return -EINVAL; + + if (cmd == SIOCGMIIREG) + ret = t4_mdio_rd(pi->adapter, 0, prtad, devad, + data->reg_num, &data->val_out); + else + ret = t4_mdio_wr(pi->adapter, 0, prtad, devad, + data->reg_num, data->val_in); + break; + default: + return -EOPNOTSUPP; + } + return ret; +} + +static void cxgb_set_rxmode(struct net_device *dev) +{ + /* unfortunately we can't return errors to the stack */ + set_rxmode(dev, -1, false); +} + +static int cxgb_change_mtu(struct net_device *dev, int new_mtu) +{ + int ret; + struct port_info *pi = netdev_priv(dev); + + if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */ + return -EINVAL; + ret = t4_set_rxmode(pi->adapter, 0, pi->viid, new_mtu, -1, -1, -1, + true); + if (!ret) + dev->mtu = new_mtu; + return ret; +} + +static int cxgb_set_mac_addr(struct net_device *dev, void *p) +{ + int ret; + struct sockaddr *addr = p; + struct port_info *pi = netdev_priv(dev); + + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + + ret = t4_change_mac(pi->adapter, 0, pi->viid, pi->xact_addr_filt, + addr->sa_data, true, true); + if (ret < 0) + return ret; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + pi->xact_addr_filt = ret; + return 0; +} + +static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp) +{ + struct port_info *pi = netdev_priv(dev); + + pi->vlan_grp = grp; + t4_set_vlan_accel(pi->adapter, 1 << pi->tx_chan, grp != NULL); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void cxgb_netpoll(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + + if (adap->flags & USING_MSIX) { + int i; + struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset]; + + for (i = pi->nqsets; i; i--, rx++) + t4_sge_intr_msix(0, &rx->rspq); + } else + t4_intr_handler(adap)(0, adap); +} +#endif + +static const struct net_device_ops cxgb4_netdev_ops = { + .ndo_open = cxgb_open, + .ndo_stop = cxgb_close, + .ndo_start_xmit = t4_eth_xmit, + .ndo_get_stats = cxgb_get_stats, + .ndo_set_rx_mode = cxgb_set_rxmode, + .ndo_set_mac_address = cxgb_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = cxgb_ioctl, + .ndo_change_mtu = cxgb_change_mtu, + .ndo_vlan_rx_register = vlan_rx_register, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = cxgb_netpoll, +#endif +}; + +void t4_fatal_err(struct adapter *adap) +{ + t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0); + t4_intr_disable(adap); + dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n"); +} + +static void setup_memwin(struct adapter *adap) +{ + u32 bar0; + + bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */ + t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0), + (bar0 + MEMWIN0_BASE) | BIR(0) | + WINDOW(ilog2(MEMWIN0_APERTURE) - 10)); + t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1), + (bar0 + MEMWIN1_BASE) | BIR(0) | + WINDOW(ilog2(MEMWIN1_APERTURE) - 10)); + t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2), + (bar0 + MEMWIN2_BASE) | BIR(0) | + WINDOW(ilog2(MEMWIN2_APERTURE) - 10)); +} + +/* + * Max # of ATIDs. The absolute HW max is 16K but we keep it lower. + */ +#define MAX_ATIDS 8192U + +/* + * Phase 0 of initialization: contact FW, obtain config, perform basic init. + */ +static int adap_init0(struct adapter *adap) +{ + int ret; + u32 v, port_vec; + enum dev_state state; + u32 params[7], val[7]; + struct fw_caps_config_cmd c; + + ret = t4_check_fw_version(adap); + if (ret == -EINVAL || ret > 0) { + if (upgrade_fw(adap) >= 0) /* recache FW version */ + ret = t4_check_fw_version(adap); + } + if (ret < 0) + return ret; + + /* contact FW, request master */ + ret = t4_fw_hello(adap, 0, 0, MASTER_MUST, &state); + if (ret < 0) { + dev_err(adap->pdev_dev, "could not connect to FW, error %d\n", + ret); + return ret; + } + + /* reset device */ + ret = t4_fw_reset(adap, 0, PIORSTMODE | PIORST); + if (ret < 0) + goto bye; + + /* get device capabilities */ + memset(&c, 0, sizeof(c)); + c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST | FW_CMD_READ); + c.retval_len16 = htonl(FW_LEN16(c)); + ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c); + if (ret < 0) + goto bye; + + /* select capabilities we'll be using */ + if (c.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) { + if (!vf_acls) + c.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM); + else + c.niccaps = htons(FW_CAPS_CONFIG_NIC_VM); + } else if (vf_acls) { + dev_err(adap->pdev_dev, "virtualization ACLs not supported"); + goto bye; + } + c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST | FW_CMD_WRITE); + ret = t4_wr_mbox(adap, 0, &c, sizeof(c), NULL); + if (ret < 0) + goto bye; + + ret = t4_config_glbl_rss(adap, 0, + FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL, + FW_RSS_GLB_CONFIG_CMD_TNLMAPEN | + FW_RSS_GLB_CONFIG_CMD_TNLALLLKP); + if (ret < 0) + goto bye; + + ret = t4_cfg_pfvf(adap, 0, 0, 0, 64, 64, 64, 0, 0, 4, 0xf, 0xf, 16, + FW_CMD_CAP_PF, FW_CMD_CAP_PF); + if (ret < 0) + goto bye; + + for (v = 0; v < SGE_NTIMERS - 1; v++) + adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL); + adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL; + adap->sge.counter_val[0] = 1; + for (v = 1; v < SGE_NCOUNTERS; v++) + adap->sge.counter_val[v] = min(intr_cnt[v - 1], + THRESHOLD_3_MASK); + t4_sge_init(adap); + + /* get basic stuff going */ + ret = t4_early_init(adap, 0); + if (ret < 0) + goto bye; + +#define FW_PARAM_DEV(param) \ + (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ + FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) + +#define FW_PARAM_PFVF(param) \ + (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ + FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)) + + params[0] = FW_PARAM_DEV(PORTVEC); + params[1] = FW_PARAM_PFVF(L2T_START); + params[2] = FW_PARAM_PFVF(L2T_END); + params[3] = FW_PARAM_PFVF(FILTER_START); + params[4] = FW_PARAM_PFVF(FILTER_END); + ret = t4_query_params(adap, 0, 0, 0, 5, params, val); + if (ret < 0) + goto bye; + port_vec = val[0]; + adap->tids.ftid_base = val[3]; + adap->tids.nftids = val[4] - val[3] + 1; + + if (c.ofldcaps) { + /* query offload-related parameters */ + params[0] = FW_PARAM_DEV(NTID); + params[1] = FW_PARAM_PFVF(SERVER_START); + params[2] = FW_PARAM_PFVF(SERVER_END); + params[3] = FW_PARAM_PFVF(TDDP_START); + params[4] = FW_PARAM_PFVF(TDDP_END); + params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); + ret = t4_query_params(adap, 0, 0, 0, 6, params, val); + if (ret < 0) + goto bye; + adap->tids.ntids = val[0]; + adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS); + adap->tids.stid_base = val[1]; + adap->tids.nstids = val[2] - val[1] + 1; + adap->vres.ddp.start = val[3]; + adap->vres.ddp.size = val[4] - val[3] + 1; + adap->params.ofldq_wr_cred = val[5]; + adap->params.offload = 1; + } + if (c.rdmacaps) { + params[0] = FW_PARAM_PFVF(STAG_START); + params[1] = FW_PARAM_PFVF(STAG_END); + params[2] = FW_PARAM_PFVF(RQ_START); + params[3] = FW_PARAM_PFVF(RQ_END); + params[4] = FW_PARAM_PFVF(PBL_START); + params[5] = FW_PARAM_PFVF(PBL_END); + ret = t4_query_params(adap, 0, 0, 0, 6, params, val); + if (ret < 0) + goto bye; + adap->vres.stag.start = val[0]; + adap->vres.stag.size = val[1] - val[0] + 1; + adap->vres.rq.start = val[2]; + adap->vres.rq.size = val[3] - val[2] + 1; + adap->vres.pbl.start = val[4]; + adap->vres.pbl.size = val[5] - val[4] + 1; + } + if (c.iscsicaps) { + params[0] = FW_PARAM_PFVF(ISCSI_START); + params[1] = FW_PARAM_PFVF(ISCSI_END); + ret = t4_query_params(adap, 0, 0, 0, 2, params, val); + if (ret < 0) + goto bye; + adap->vres.iscsi.start = val[0]; + adap->vres.iscsi.size = val[1] - val[0] + 1; + } +#undef FW_PARAM_PFVF +#undef FW_PARAM_DEV + + adap->params.nports = hweight32(port_vec); + adap->params.portvec = port_vec; + adap->flags |= FW_OK; + + /* These are finalized by FW initialization, load their values now */ + v = t4_read_reg(adap, TP_TIMER_RESOLUTION); + adap->params.tp.tre = TIMERRESOLUTION_GET(v); + t4_read_mtu_tbl(adap, adap->params.mtus, NULL); + t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, + adap->params.b_wnd); + + /* tweak some settings */ + t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849); + t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12)); + t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG); + v = t4_read_reg(adap, TP_PIO_DATA); + t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR); + setup_memwin(adap); + return 0; + + /* + * If a command timed out or failed with EIO FW does not operate within + * its spec or something catastrophic happened to HW/FW, stop issuing + * commands. + */ +bye: if (ret != -ETIMEDOUT && ret != -EIO) + t4_fw_bye(adap, 0); + return ret; +} + +static inline bool is_10g_port(const struct link_config *lc) +{ + return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0; +} + +static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx, + unsigned int size, unsigned int iqe_size) +{ + q->intr_params = QINTR_TIMER_IDX(timer_idx) | + (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0); + q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0; + q->iqe_len = iqe_size; + q->size = size; +} + +/* + * Perform default configuration of DMA queues depending on the number and type + * of ports we found and the number of available CPUs. Most settings can be + * modified by the admin prior to actual use. + */ +static void __devinit cfg_queues(struct adapter *adap) +{ + struct sge *s = &adap->sge; + int i, q10g = 0, n10g = 0, qidx = 0; + + for_each_port(adap, i) + n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg); + + /* + * We default to 1 queue per non-10G port and up to # of cores queues + * per 10G port. + */ + if (n10g) + q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g; + if (q10g > num_online_cpus()) + q10g = num_online_cpus(); + + for_each_port(adap, i) { + struct port_info *pi = adap2pinfo(adap, i); + + pi->first_qset = qidx; + pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1; + qidx += pi->nqsets; + } + + s->ethqsets = qidx; + s->max_ethqsets = qidx; /* MSI-X may lower it later */ + + if (is_offload(adap)) { + /* + * For offload we use 1 queue/channel if all ports are up to 1G, + * otherwise we divide all available queues amongst the channels + * capped by the number of available cores. + */ + if (n10g) { + i = min_t(int, ARRAY_SIZE(s->ofldrxq), + num_online_cpus()); + s->ofldqsets = roundup(i, adap->params.nports); + } else + s->ofldqsets = adap->params.nports; + /* For RDMA one Rx queue per channel suffices */ + s->rdmaqs = adap->params.nports; + } + + for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { + struct sge_eth_rxq *r = &s->ethrxq[i]; + + init_rspq(&r->rspq, 0, 0, 1024, 64); + r->fl.size = 72; + } + + for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++) + s->ethtxq[i].q.size = 1024; + + for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) + s->ctrlq[i].q.size = 512; + + for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) + s->ofldtxq[i].q.size = 1024; + + for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) { + struct sge_ofld_rxq *r = &s->ofldrxq[i]; + + init_rspq(&r->rspq, 0, 0, 1024, 64); + r->rspq.uld = CXGB4_ULD_ISCSI; + r->fl.size = 72; + } + + for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) { + struct sge_ofld_rxq *r = &s->rdmarxq[i]; + + init_rspq(&r->rspq, 0, 0, 511, 64); + r->rspq.uld = CXGB4_ULD_RDMA; + r->fl.size = 72; + } + + init_rspq(&s->fw_evtq, 6, 0, 512, 64); + init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64); +} + +/* + * Reduce the number of Ethernet queues across all ports to at most n. + * n provides at least one queue per port. + */ +static void __devinit reduce_ethqs(struct adapter *adap, int n) +{ + int i; + struct port_info *pi; + + while (n < adap->sge.ethqsets) + for_each_port(adap, i) { + pi = adap2pinfo(adap, i); + if (pi->nqsets > 1) { + pi->nqsets--; + adap->sge.ethqsets--; + if (adap->sge.ethqsets <= n) + break; + } + } + + n = 0; + for_each_port(adap, i) { + pi = adap2pinfo(adap, i); + pi->first_qset = n; + n += pi->nqsets; + } +} + +/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */ +#define EXTRA_VECS 2 + +static int __devinit enable_msix(struct adapter *adap) +{ + int ofld_need = 0; + int i, err, want, need; + struct sge *s = &adap->sge; + unsigned int nchan = adap->params.nports; + struct msix_entry entries[MAX_INGQ + 1]; + + for (i = 0; i < ARRAY_SIZE(entries); ++i) + entries[i].entry = i; + + want = s->max_ethqsets + EXTRA_VECS; + if (is_offload(adap)) { + want += s->rdmaqs + s->ofldqsets; + /* need nchan for each possible ULD */ + ofld_need = 2 * nchan; + } + need = adap->params.nports + EXTRA_VECS + ofld_need; + + while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need) + want = err; + + if (!err) { + /* + * Distribute available vectors to the various queue groups. + * Every group gets its minimum requirement and NIC gets top + * priority for leftovers. + */ + i = want - EXTRA_VECS - ofld_need; + if (i < s->max_ethqsets) { + s->max_ethqsets = i; + if (i < s->ethqsets) + reduce_ethqs(adap, i); + } + if (is_offload(adap)) { + i = want - EXTRA_VECS - s->max_ethqsets; + i -= ofld_need - nchan; + s->ofldqsets = (i / nchan) * nchan; /* round down */ + } + for (i = 0; i < want; ++i) + adap->msix_info[i].vec = entries[i].vector; + } else if (err > 0) + dev_info(adap->pdev_dev, + "only %d MSI-X vectors left, not using MSI-X\n", err); + return err; +} + +#undef EXTRA_VECS + +static void __devinit print_port_info(struct adapter *adap) +{ + static const char *base[] = { + "R", "KX4", "T", "KX", "T", "KR", "CX4" + }; + + int i; + char buf[80]; + + for_each_port(adap, i) { + struct net_device *dev = adap->port[i]; + const struct port_info *pi = netdev_priv(dev); + char *bufp = buf; + + if (!test_bit(i, &adap->registered_device_map)) + continue; + + if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M) + bufp += sprintf(bufp, "100/"); + if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G) + bufp += sprintf(bufp, "1000/"); + if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) + bufp += sprintf(bufp, "10G/"); + if (bufp != buf) + --bufp; + sprintf(bufp, "BASE-%s", base[pi->port_type]); + + netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s\n", + adap->params.vpd.id, adap->params.rev, + buf, is_offload(adap) ? "R" : "", + adap->params.pci.width, + (adap->flags & USING_MSIX) ? " MSI-X" : + (adap->flags & USING_MSI) ? " MSI" : ""); + if (adap->name == dev->name) + netdev_info(dev, "S/N: %s, E/C: %s\n", + adap->params.vpd.sn, adap->params.vpd.ec); + } +} + +#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |\ + NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) + +static int __devinit init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int func, i, err; + struct port_info *pi; + unsigned int highdma = 0; + struct adapter *adapter = NULL; + + printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); + + err = pci_request_regions(pdev, KBUILD_MODNAME); + if (err) { + /* Just info, some other driver may have claimed the device. */ + dev_info(&pdev->dev, "cannot obtain PCI resources\n"); + return err; + } + + /* We control everything through PF 0 */ + func = PCI_FUNC(pdev->devfn); + if (func > 0) + goto sriov; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "cannot enable PCI device\n"); + goto out_release_regions; + } + + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + highdma = NETIF_F_HIGHDMA; + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " + "coherent allocations\n"); + goto out_disable_device; + } + } else { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "no usable DMA configuration\n"); + goto out_disable_device; + } + } + + pci_enable_pcie_error_reporting(pdev); + pci_set_master(pdev); + pci_save_state(pdev); + + adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); + if (!adapter) { + err = -ENOMEM; + goto out_disable_device; + } + + adapter->regs = pci_ioremap_bar(pdev, 0); + if (!adapter->regs) { + dev_err(&pdev->dev, "cannot map device registers\n"); + err = -ENOMEM; + goto out_free_adapter; + } + + adapter->pdev = pdev; + adapter->pdev_dev = &pdev->dev; + adapter->name = pci_name(pdev); + adapter->msg_enable = dflt_msg_enable; + memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); + + spin_lock_init(&adapter->stats_lock); + spin_lock_init(&adapter->tid_release_lock); + + INIT_WORK(&adapter->tid_release_task, process_tid_release_list); + + err = t4_prep_adapter(adapter); + if (err) + goto out_unmap_bar; + err = adap_init0(adapter); + if (err) + goto out_unmap_bar; + + for_each_port(adapter, i) { + struct net_device *netdev; + + netdev = alloc_etherdev_mq(sizeof(struct port_info), + MAX_ETH_QSETS); + if (!netdev) { + err = -ENOMEM; + goto out_free_dev; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + adapter->port[i] = netdev; + pi = netdev_priv(netdev); + pi->adapter = adapter; + pi->xact_addr_filt = -1; + pi->rx_offload = RX_CSO; + pi->port_id = i; + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + netdev->irq = pdev->irq; + + netdev->features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6; + netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + netdev->features |= NETIF_F_GRO | highdma; + netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + netdev->vlan_features = netdev->features & VLAN_FEAT; + + netdev->netdev_ops = &cxgb4_netdev_ops; + SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops); + } + + pci_set_drvdata(pdev, adapter); + + if (adapter->flags & FW_OK) { + err = t4_port_init(adapter, 0, 0, 0); + if (err) + goto out_free_dev; + } + + /* + * Configure queues and allocate tables now, they can be needed as + * soon as the first register_netdev completes. + */ + cfg_queues(adapter); + + adapter->l2t = t4_init_l2t(); + if (!adapter->l2t) { + /* We tolerate a lack of L2T, giving up some functionality */ + dev_warn(&pdev->dev, "could not allocate L2T, continuing\n"); + adapter->params.offload = 0; + } + + if (is_offload(adapter) && tid_init(&adapter->tids) < 0) { + dev_warn(&pdev->dev, "could not allocate TID table, " + "continuing\n"); + adapter->params.offload = 0; + } + + /* + * The card is now ready to go. If any errors occur during device + * registration we do not fail the whole card but rather proceed only + * with the ports we manage to register successfully. However we must + * register at least one net device. + */ + for_each_port(adapter, i) { + err = register_netdev(adapter->port[i]); + if (err) + dev_warn(&pdev->dev, + "cannot register net device %s, skipping\n", + adapter->port[i]->name); + else { + /* + * Change the name we use for messages to the name of + * the first successfully registered interface. + */ + if (!adapter->registered_device_map) + adapter->name = adapter->port[i]->name; + + __set_bit(i, &adapter->registered_device_map); + adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i; + } + } + if (!adapter->registered_device_map) { + dev_err(&pdev->dev, "could not register any net devices\n"); + goto out_free_dev; + } + + if (cxgb4_debugfs_root) { + adapter->debugfs_root = debugfs_create_dir(pci_name(pdev), + cxgb4_debugfs_root); + setup_debugfs(adapter); + } + + /* See what interrupts we'll be using */ + if (msi > 1 && enable_msix(adapter) == 0) + adapter->flags |= USING_MSIX; + else if (msi > 0 && pci_enable_msi(pdev) == 0) + adapter->flags |= USING_MSI; + + if (is_offload(adapter)) + attach_ulds(adapter); + + print_port_info(adapter); + +sriov: +#ifdef CONFIG_PCI_IOV + if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0) + if (pci_enable_sriov(pdev, num_vf[func]) == 0) + dev_info(&pdev->dev, + "instantiated %u virtual functions\n", + num_vf[func]); +#endif + return 0; + + out_free_dev: + t4_free_mem(adapter->tids.tid_tab); + t4_free_mem(adapter->l2t); + for_each_port(adapter, i) + if (adapter->port[i]) + free_netdev(adapter->port[i]); + if (adapter->flags & FW_OK) + t4_fw_bye(adapter, 0); + out_unmap_bar: + iounmap(adapter->regs); + out_free_adapter: + kfree(adapter); + out_disable_device: + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + out_release_regions: + pci_release_regions(pdev); + pci_set_drvdata(pdev, NULL); + return err; +} + +static void __devexit remove_one(struct pci_dev *pdev) +{ + struct adapter *adapter = pci_get_drvdata(pdev); + + pci_disable_sriov(pdev); + + if (adapter) { + int i; + + if (is_offload(adapter)) + detach_ulds(adapter); + + for_each_port(adapter, i) + if (test_bit(i, &adapter->registered_device_map)) + unregister_netdev(adapter->port[i]); + + if (adapter->debugfs_root) + debugfs_remove_recursive(adapter->debugfs_root); + + t4_sge_stop(adapter); + t4_free_sge_resources(adapter); + t4_free_mem(adapter->l2t); + t4_free_mem(adapter->tids.tid_tab); + disable_msi(adapter); + + for_each_port(adapter, i) + if (adapter->port[i]) + free_netdev(adapter->port[i]); + + if (adapter->flags & FW_OK) + t4_fw_bye(adapter, 0); + iounmap(adapter->regs); + kfree(adapter); + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + pci_release_regions(pdev); + pci_set_drvdata(pdev, NULL); + } else if (PCI_FUNC(pdev->devfn) > 0) + pci_release_regions(pdev); +} + +static struct pci_driver cxgb4_driver = { + .name = KBUILD_MODNAME, + .id_table = cxgb4_pci_tbl, + .probe = init_one, + .remove = __devexit_p(remove_one), +}; + +static int __init cxgb4_init_module(void) +{ + int ret; + + /* Debugfs support is optional, just warn if this fails */ + cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); + if (!cxgb4_debugfs_root) + pr_warning("could not create debugfs entry, continuing\n"); + + ret = pci_register_driver(&cxgb4_driver); + if (ret < 0) + debugfs_remove(cxgb4_debugfs_root); + return ret; +} + +static void __exit cxgb4_cleanup_module(void) +{ + pci_unregister_driver(&cxgb4_driver); + debugfs_remove(cxgb4_debugfs_root); /* NULL ok */ +} + +module_init(cxgb4_init_module); +module_exit(cxgb4_cleanup_module); -- cgit v1.2.3 From 43e9da8d782b8a40d5127fcc59ac2e543cf16d7d Mon Sep 17 00:00:00 2001 From: Dimitris Michailidis Date: Thu, 1 Apr 2010 15:28:27 +0000 Subject: net: Hook up cxgb4 to Kconfig and Makefile Signed-off-by: Dimitris Michailidis Signed-off-by: David S. Miller --- drivers/net/Kconfig | 25 +++++++++++++++++++++++++ drivers/net/Makefile | 1 + 2 files changed, 26 insertions(+) diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 0ba5b8e50a7c..7b832c727f87 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -2582,6 +2582,31 @@ config CHELSIO_T3 To compile this driver as a module, choose M here: the module will be called cxgb3. +config CHELSIO_T4_DEPENDS + tristate + depends on PCI && INET + default y + +config CHELSIO_T4 + tristate "Chelsio Communications T4 Ethernet support" + depends on CHELSIO_T4_DEPENDS + select FW_LOADER + select MDIO + help + This driver supports Chelsio T4-based gigabit and 10Gb Ethernet + adapters. + + For general information about Chelsio and our products, visit + our website at . + + For customer support, please visit our customer support page at + . + + Please send feedback to . + + To compile this driver as a module choose M here; the module + will be called cxgb4. + config EHEA tristate "eHEA Ethernet support" depends on IBMEBUS && INET && SPARSEMEM diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 478886234c28..a583b50d9de8 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -19,6 +19,7 @@ obj-$(CONFIG_IXGB) += ixgb/ obj-$(CONFIG_IP1000) += ipg.o obj-$(CONFIG_CHELSIO_T1) += chelsio/ obj-$(CONFIG_CHELSIO_T3) += cxgb3/ +obj-$(CONFIG_CHELSIO_T4) += cxgb4/ obj-$(CONFIG_EHEA) += ehea/ obj-$(CONFIG_CAN) += can/ obj-$(CONFIG_BONDING) += bonding/ -- cgit v1.2.3 From 93f4d91d879acfcb0ba9c2725e3133fcff2dfd1e Mon Sep 17 00:00:00 2001 From: Neil Horman Date: Thu, 1 Apr 2010 07:30:07 +0000 Subject: r8169: clean up my printk uglyness Fix formatting on r8169 printk Brandon Philips noted that I had a spacing issue in my printk for the last r8169 patch that made it quite ugly. Fix that up and add the PFX macro to it as well so it looks like the other r8169 printks Signed-off-by: Neil Horman Signed-off-by: David S. Miller --- drivers/net/r8169.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 96740051cdcc..dbb1f5a1824c 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -3227,8 +3227,8 @@ static void rtl8169_set_rxbufsize(struct rtl8169_private *tp, unsigned int max_frame = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; if (max_frame != 16383) - printk(KERN_WARNING "WARNING! Changing of MTU on this NIC" - "May lead to frame reception errors!\n"); + printk(KERN_WARNING PFX "WARNING! Changing of MTU on this " + "NIC may lead to frame reception errors!\n"); tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE; } -- cgit v1.2.3 From fb9e2d887243499b8d28efcf80821c4f6a092395 Mon Sep 17 00:00:00 2001 From: Ken Kawasaki Date: Sat, 3 Apr 2010 15:07:10 -0700 Subject: smc91c92_cs: fix the problem of "Unable to find hardware address" smc91c92_cs: *cvt_ascii_address returns 0, if success. *call free_netdev, if we can't find hardware address. Signed-off-by: Ken Kawasaki Signed-off-by: David S. Miller --- drivers/net/pcmcia/smc91c92_cs.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c index 5adc662c4bfb..ff7eb9116b6a 100644 --- a/drivers/net/pcmcia/smc91c92_cs.c +++ b/drivers/net/pcmcia/smc91c92_cs.c @@ -493,13 +493,14 @@ static int pcmcia_get_versmac(struct pcmcia_device *p_dev, { struct net_device *dev = priv; cisparse_t parse; + u8 *buf; if (pcmcia_parse_tuple(tuple, &parse)) return -EINVAL; - if ((parse.version_1.ns > 3) && - (cvt_ascii_address(dev, - (parse.version_1.str + parse.version_1.ofs[3])))) + buf = parse.version_1.str + parse.version_1.ofs[3]; + + if ((parse.version_1.ns > 3) && (cvt_ascii_address(dev, buf) == 0)) return 0; return -EINVAL; @@ -528,7 +529,7 @@ static int mhz_setup(struct pcmcia_device *link) len = pcmcia_get_tuple(link, 0x81, &buf); if (buf && len >= 13) { buf[12] = '\0'; - if (cvt_ascii_address(dev, buf)) + if (cvt_ascii_address(dev, buf) == 0) rc = 0; } kfree(buf); @@ -910,7 +911,7 @@ static int smc91c92_config(struct pcmcia_device *link) if (i != 0) { printk(KERN_NOTICE "smc91c92_cs: Unable to find hardware address.\n"); - goto config_undo; + goto config_failed; } smc->duplex = 0; @@ -998,6 +999,7 @@ config_undo: unregister_netdev(dev); config_failed: smc91c92_release(link); + free_netdev(dev); return -ENODEV; } /* smc91c92_config */ -- cgit v1.2.3