1/* 2 * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> et al. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 * 18 */ 19 20#ifndef __MTD_CFI_H__ 21#define __MTD_CFI_H__ 22 23#include <linux/delay.h> 24#include <linux/types.h> 25#include <linux/bug.h> 26#include <linux/interrupt.h> 27#include <linux/mtd/flashchip.h> 28#include <linux/mtd/map.h> 29#include <linux/mtd/cfi_endian.h> 30#include <linux/mtd/xip.h> 31 32#ifdef CONFIG_MTD_CFI_I1 33#define cfi_interleave(cfi) 1 34#define cfi_interleave_is_1(cfi) (cfi_interleave(cfi) == 1) 35#else 36#define cfi_interleave_is_1(cfi) (0) 37#endif 38 39#ifdef CONFIG_MTD_CFI_I2 40# ifdef cfi_interleave 41# undef cfi_interleave 42# define cfi_interleave(cfi) ((cfi)->interleave) 43# else 44# define cfi_interleave(cfi) 2 45# endif 46#define cfi_interleave_is_2(cfi) (cfi_interleave(cfi) == 2) 47#else 48#define cfi_interleave_is_2(cfi) (0) 49#endif 50 51#ifdef CONFIG_MTD_CFI_I4 52# ifdef cfi_interleave 53# undef cfi_interleave 54# define cfi_interleave(cfi) ((cfi)->interleave) 55# else 56# define cfi_interleave(cfi) 4 57# endif 58#define cfi_interleave_is_4(cfi) (cfi_interleave(cfi) == 4) 59#else 60#define cfi_interleave_is_4(cfi) (0) 61#endif 62 63#ifdef CONFIG_MTD_CFI_I8 64# ifdef cfi_interleave 65# undef cfi_interleave 66# define cfi_interleave(cfi) ((cfi)->interleave) 67# else 68# define cfi_interleave(cfi) 8 69# endif 70#define cfi_interleave_is_8(cfi) (cfi_interleave(cfi) == 8) 71#else 72#define cfi_interleave_is_8(cfi) (0) 73#endif 74 75#ifndef cfi_interleave 76#warning No CONFIG_MTD_CFI_Ix selected. No NOR chip support can work. 77static inline int cfi_interleave(void *cfi) 78{ 79 BUG(); 80 return 0; 81} 82#endif 83 84static inline int cfi_interleave_supported(int i) 85{ 86 switch (i) { 87#ifdef CONFIG_MTD_CFI_I1 88 case 1: 89#endif 90#ifdef CONFIG_MTD_CFI_I2 91 case 2: 92#endif 93#ifdef CONFIG_MTD_CFI_I4 94 case 4: 95#endif 96#ifdef CONFIG_MTD_CFI_I8 97 case 8: 98#endif 99 return 1; 100 101 default: 102 return 0; 103 } 104} 105 106 107/* NB: these values must represents the number of bytes needed to meet the 108 * device type (x8, x16, x32). Eg. a 32 bit device is 4 x 8 bytes. 109 * These numbers are used in calculations. 110 */ 111#define CFI_DEVICETYPE_X8 (8 / 8) 112#define CFI_DEVICETYPE_X16 (16 / 8) 113#define CFI_DEVICETYPE_X32 (32 / 8) 114#define CFI_DEVICETYPE_X64 (64 / 8) 115 116 117/* Device Interface Code Assignments from the "Common Flash Memory Interface 118 * Publication 100" dated December 1, 2001. 119 */ 120#define CFI_INTERFACE_X8_ASYNC 0x0000 121#define CFI_INTERFACE_X16_ASYNC 0x0001 122#define CFI_INTERFACE_X8_BY_X16_ASYNC 0x0002 123#define CFI_INTERFACE_X32_ASYNC 0x0003 124#define CFI_INTERFACE_X16_BY_X32_ASYNC 0x0005 125#define CFI_INTERFACE_NOT_ALLOWED 0xffff 126 127 128/* NB: We keep these structures in memory in HOST byteorder, except 129 * where individually noted. 130 */ 131 132/* Basic Query Structure */ 133struct cfi_ident { 134 uint8_t qry[3]; 135 uint16_t P_ID; 136 uint16_t P_ADR; 137 uint16_t A_ID; 138 uint16_t A_ADR; 139 uint8_t VccMin; 140 uint8_t VccMax; 141 uint8_t VppMin; 142 uint8_t VppMax; 143 uint8_t WordWriteTimeoutTyp; 144 uint8_t BufWriteTimeoutTyp; 145 uint8_t BlockEraseTimeoutTyp; 146 uint8_t ChipEraseTimeoutTyp; 147 uint8_t WordWriteTimeoutMax; 148 uint8_t BufWriteTimeoutMax; 149 uint8_t BlockEraseTimeoutMax; 150 uint8_t ChipEraseTimeoutMax; 151 uint8_t DevSize; 152 uint16_t InterfaceDesc; 153 uint16_t MaxBufWriteSize; 154 uint8_t NumEraseRegions; 155 uint32_t EraseRegionInfo[0]; /* Not host ordered */ 156} __packed; 157 158/* Extended Query Structure for both PRI and ALT */ 159 160struct cfi_extquery { 161 uint8_t pri[3]; 162 uint8_t MajorVersion; 163 uint8_t MinorVersion; 164} __packed; 165 166/* Vendor-Specific PRI for Intel/Sharp Extended Command Set (0x0001) */ 167 168struct cfi_pri_intelext { 169 uint8_t pri[3]; 170 uint8_t MajorVersion; 171 uint8_t MinorVersion; 172 uint32_t FeatureSupport; /* if bit 31 is set then an additional uint32_t feature 173 block follows - FIXME - not currently supported */ 174 uint8_t SuspendCmdSupport; 175 uint16_t BlkStatusRegMask; 176 uint8_t VccOptimal; 177 uint8_t VppOptimal; 178 uint8_t NumProtectionFields; 179 uint16_t ProtRegAddr; 180 uint8_t FactProtRegSize; 181 uint8_t UserProtRegSize; 182 uint8_t extra[0]; 183} __packed; 184 185struct cfi_intelext_otpinfo { 186 uint32_t ProtRegAddr; 187 uint16_t FactGroups; 188 uint8_t FactProtRegSize; 189 uint16_t UserGroups; 190 uint8_t UserProtRegSize; 191} __packed; 192 193struct cfi_intelext_blockinfo { 194 uint16_t NumIdentBlocks; 195 uint16_t BlockSize; 196 uint16_t MinBlockEraseCycles; 197 uint8_t BitsPerCell; 198 uint8_t BlockCap; 199} __packed; 200 201struct cfi_intelext_regioninfo { 202 uint16_t NumIdentPartitions; 203 uint8_t NumOpAllowed; 204 uint8_t NumOpAllowedSimProgMode; 205 uint8_t NumOpAllowedSimEraMode; 206 uint8_t NumBlockTypes; 207 struct cfi_intelext_blockinfo BlockTypes[1]; 208} __packed; 209 210struct cfi_intelext_programming_regioninfo { 211 uint8_t ProgRegShift; 212 uint8_t Reserved1; 213 uint8_t ControlValid; 214 uint8_t Reserved2; 215 uint8_t ControlInvalid; 216 uint8_t Reserved3; 217} __packed; 218 219/* Vendor-Specific PRI for AMD/Fujitsu Extended Command Set (0x0002) */ 220 221struct cfi_pri_amdstd { 222 uint8_t pri[3]; 223 uint8_t MajorVersion; 224 uint8_t MinorVersion; 225 uint8_t SiliconRevision; /* bits 1-0: Address Sensitive Unlock */ 226 uint8_t EraseSuspend; 227 uint8_t BlkProt; 228 uint8_t TmpBlkUnprotect; 229 uint8_t BlkProtUnprot; 230 uint8_t SimultaneousOps; 231 uint8_t BurstMode; 232 uint8_t PageMode; 233 uint8_t VppMin; 234 uint8_t VppMax; 235 uint8_t TopBottom; 236} __packed; 237 238/* Vendor-Specific PRI for Atmel chips (command set 0x0002) */ 239 240struct cfi_pri_atmel { 241 uint8_t pri[3]; 242 uint8_t MajorVersion; 243 uint8_t MinorVersion; 244 uint8_t Features; 245 uint8_t BottomBoot; 246 uint8_t BurstMode; 247 uint8_t PageMode; 248} __packed; 249 250struct cfi_pri_query { 251 uint8_t NumFields; 252 uint32_t ProtField[1]; /* Not host ordered */ 253} __packed; 254 255struct cfi_bri_query { 256 uint8_t PageModeReadCap; 257 uint8_t NumFields; 258 uint32_t ConfField[1]; /* Not host ordered */ 259} __packed; 260 261#define P_ID_NONE 0x0000 262#define P_ID_INTEL_EXT 0x0001 263#define P_ID_AMD_STD 0x0002 264#define P_ID_INTEL_STD 0x0003 265#define P_ID_AMD_EXT 0x0004 266#define P_ID_WINBOND 0x0006 267#define P_ID_ST_ADV 0x0020 268#define P_ID_MITSUBISHI_STD 0x0100 269#define P_ID_MITSUBISHI_EXT 0x0101 270#define P_ID_SST_PAGE 0x0102 271#define P_ID_SST_OLD 0x0701 272#define P_ID_INTEL_PERFORMANCE 0x0200 273#define P_ID_INTEL_DATA 0x0210 274#define P_ID_RESERVED 0xffff 275 276 277#define CFI_MODE_CFI 1 278#define CFI_MODE_JEDEC 0 279 280struct cfi_private { 281 uint16_t cmdset; 282 void *cmdset_priv; 283 int interleave; 284 int device_type; 285 int cfi_mode; /* Are we a JEDEC device pretending to be CFI? */ 286 int addr_unlock1; 287 int addr_unlock2; 288 struct mtd_info *(*cmdset_setup)(struct map_info *); 289 struct cfi_ident *cfiq; /* For now only one. We insist that all devs 290 must be of the same type. */ 291 int mfr, id; 292 int numchips; 293 map_word sector_erase_cmd; 294 unsigned long chipshift; /* Because they're of the same type */ 295 const char *im_name; /* inter_module name for cmdset_setup */ 296 struct flchip chips[0]; /* per-chip data structure for each chip */ 297}; 298 299/* 300 * Returns the command address according to the given geometry. 301 */ 302static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, 303 struct map_info *map, struct cfi_private *cfi) 304{ 305 unsigned bankwidth = map_bankwidth(map); 306 unsigned interleave = cfi_interleave(cfi); 307 unsigned type = cfi->device_type; 308 uint32_t addr; 309 310 addr = (cmd_ofs * type) * interleave; 311 312 /* Modify the unlock address if we are in compatibility mode. 313 * For 16bit devices on 8 bit busses 314 * and 32bit devices on 16 bit busses 315 * set the low bit of the alternating bit sequence of the address. 316 */ 317 if (((type * interleave) > bankwidth) && ((cmd_ofs & 0xff) == 0xaa)) 318 addr |= (type >> 1)*interleave; 319 320 return addr; 321} 322 323/* 324 * Transforms the CFI command for the given geometry (bus width & interleave). 325 * It looks too long to be inline, but in the common case it should almost all 326 * get optimised away. 327 */ 328static inline map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi) 329{ 330 map_word val = { {0} }; 331 int wordwidth, words_per_bus, chip_mode, chips_per_word; 332 unsigned long onecmd; 333 int i; 334 335 /* We do it this way to give the compiler a fighting chance 336 of optimising away all the crap for 'bankwidth' larger than 337 an unsigned long, in the common case where that support is 338 disabled */ 339 if (map_bankwidth_is_large(map)) { 340 wordwidth = sizeof(unsigned long); 341 words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1 342 } else { 343 wordwidth = map_bankwidth(map); 344 words_per_bus = 1; 345 } 346 347 chip_mode = map_bankwidth(map) / cfi_interleave(cfi); 348 chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map); 349 350 /* First, determine what the bit-pattern should be for a single 351 device, according to chip mode and endianness... */ 352 switch (chip_mode) { 353 default: BUG(); 354 case 1: 355 onecmd = cmd; 356 break; 357 case 2: 358 onecmd = cpu_to_cfi16(map, cmd); 359 break; 360 case 4: 361 onecmd = cpu_to_cfi32(map, cmd); 362 break; 363 } 364 365 /* Now replicate it across the size of an unsigned long, or 366 just to the bus width as appropriate */ 367 switch (chips_per_word) { 368 default: BUG(); 369#if BITS_PER_LONG >= 64 370 case 8: 371 onecmd |= (onecmd << (chip_mode * 32)); 372#endif 373 case 4: 374 onecmd |= (onecmd << (chip_mode * 16)); 375 case 2: 376 onecmd |= (onecmd << (chip_mode * 8)); 377 case 1: 378 ; 379 } 380 381 /* And finally, for the multi-word case, replicate it 382 in all words in the structure */ 383 for (i=0; i < words_per_bus; i++) { 384 val.x[i] = onecmd; 385 } 386 387 return val; 388} 389#define CMD(x) cfi_build_cmd((x), map, cfi) 390 391 392static inline unsigned long cfi_merge_status(map_word val, struct map_info *map, 393 struct cfi_private *cfi) 394{ 395 int wordwidth, words_per_bus, chip_mode, chips_per_word; 396 unsigned long onestat, res = 0; 397 int i; 398 399 /* We do it this way to give the compiler a fighting chance 400 of optimising away all the crap for 'bankwidth' larger than 401 an unsigned long, in the common case where that support is 402 disabled */ 403 if (map_bankwidth_is_large(map)) { 404 wordwidth = sizeof(unsigned long); 405 words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1 406 } else { 407 wordwidth = map_bankwidth(map); 408 words_per_bus = 1; 409 } 410 411 chip_mode = map_bankwidth(map) / cfi_interleave(cfi); 412 chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map); 413 414 onestat = val.x[0]; 415 /* Or all status words together */ 416 for (i=1; i < words_per_bus; i++) { 417 onestat |= val.x[i]; 418 } 419 420 res = onestat; 421 switch(chips_per_word) { 422 default: BUG(); 423#if BITS_PER_LONG >= 64 424 case 8: 425 res |= (onestat >> (chip_mode * 32)); 426#endif 427 case 4: 428 res |= (onestat >> (chip_mode * 16)); 429 case 2: 430 res |= (onestat >> (chip_mode * 8)); 431 case 1: 432 ; 433 } 434 435 /* Last, determine what the bit-pattern should be for a single 436 device, according to chip mode and endianness... */ 437 switch (chip_mode) { 438 case 1: 439 break; 440 case 2: 441 res = cfi16_to_cpu(map, res); 442 break; 443 case 4: 444 res = cfi32_to_cpu(map, res); 445 break; 446 default: BUG(); 447 } 448 return res; 449} 450 451#define MERGESTATUS(x) cfi_merge_status((x), map, cfi) 452 453 454/* 455 * Sends a CFI command to a bank of flash for the given geometry. 456 * 457 * Returns the offset in flash where the command was written. 458 * If prev_val is non-null, it will be set to the value at the command address, 459 * before the command was written. 460 */ 461static inline uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base, 462 struct map_info *map, struct cfi_private *cfi, 463 int type, map_word *prev_val) 464{ 465 map_word val; 466 uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, map, cfi); 467 val = cfi_build_cmd(cmd, map, cfi); 468 469 if (prev_val) 470 *prev_val = map_read(map, addr); 471 472 map_write(map, val, addr); 473 474 return addr - base; 475} 476 477static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr) 478{ 479 map_word val = map_read(map, addr); 480 481 if (map_bankwidth_is_1(map)) { 482 return val.x[0]; 483 } else if (map_bankwidth_is_2(map)) { 484 return cfi16_to_cpu(map, val.x[0]); 485 } else { 486 /* No point in a 64-bit byteswap since that would just be 487 swapping the responses from different chips, and we are 488 only interested in one chip (a representative sample) */ 489 return cfi32_to_cpu(map, val.x[0]); 490 } 491} 492 493static inline uint16_t cfi_read_query16(struct map_info *map, uint32_t addr) 494{ 495 map_word val = map_read(map, addr); 496 497 if (map_bankwidth_is_1(map)) { 498 return val.x[0] & 0xff; 499 } else if (map_bankwidth_is_2(map)) { 500 return cfi16_to_cpu(map, val.x[0]); 501 } else { 502 /* No point in a 64-bit byteswap since that would just be 503 swapping the responses from different chips, and we are 504 only interested in one chip (a representative sample) */ 505 return cfi32_to_cpu(map, val.x[0]); 506 } 507} 508 509static inline void cfi_udelay(int us) 510{ 511 if (us >= 1000) { 512 msleep((us+999)/1000); 513 } else { 514 udelay(us); 515 cond_resched(); 516 } 517} 518 519int __xipram cfi_qry_present(struct map_info *map, __u32 base, 520 struct cfi_private *cfi); 521int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map, 522 struct cfi_private *cfi); 523void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map, 524 struct cfi_private *cfi); 525 526struct cfi_extquery *cfi_read_pri(struct map_info *map, uint16_t adr, uint16_t size, 527 const char* name); 528struct cfi_fixup { 529 uint16_t mfr; 530 uint16_t id; 531 void (*fixup)(struct mtd_info *mtd); 532}; 533 534#define CFI_MFR_ANY 0xFFFF 535#define CFI_ID_ANY 0xFFFF 536#define CFI_MFR_CONTINUATION 0x007F 537 538#define CFI_MFR_AMD 0x0001 539#define CFI_MFR_AMIC 0x0037 540#define CFI_MFR_ATMEL 0x001F 541#define CFI_MFR_EON 0x001C 542#define CFI_MFR_FUJITSU 0x0004 543#define CFI_MFR_HYUNDAI 0x00AD 544#define CFI_MFR_INTEL 0x0089 545#define CFI_MFR_MACRONIX 0x00C2 546#define CFI_MFR_NEC 0x0010 547#define CFI_MFR_PMC 0x009D 548#define CFI_MFR_SAMSUNG 0x00EC 549#define CFI_MFR_SHARP 0x00B0 550#define CFI_MFR_SST 0x00BF 551#define CFI_MFR_ST 0x0020 /* STMicroelectronics */ 552#define CFI_MFR_TOSHIBA 0x0098 553#define CFI_MFR_WINBOND 0x00DA 554 555void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups); 556 557typedef int (*varsize_frob_t)(struct map_info *map, struct flchip *chip, 558 unsigned long adr, int len, void *thunk); 559 560int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob, 561 loff_t ofs, size_t len, void *thunk); 562 563 564#endif /* __MTD_CFI_H__ */ 565